From 36f07146d4ce09858f47fda17053eed8bcf86fc5 Mon Sep 17 00:00:00 2001 From: Heming Han Date: Mon, 11 Sep 2023 00:44:22 -0700 Subject: [PATCH] add netlibcni --- agent/go.mod | 26 +- agent/go.sum | 707 +- .../Microsoft/go-winio/.gitattributes | 1 + .../github.com/Microsoft/go-winio/.gitignore | 9 + .../Microsoft/go-winio/.golangci.yml | 144 + .../github.com/Microsoft/go-winio/README.md | 72 +- .../github.com/Microsoft/go-winio/SECURITY.md | 41 + .../github.com/Microsoft/go-winio/backup.go | 48 +- .../github.com/Microsoft/go-winio/doc.go | 22 + .../github.com/Microsoft/go-winio/ea.go | 8 +- .../github.com/Microsoft/go-winio/file.go | 66 +- .../github.com/Microsoft/go-winio/fileinfo.go | 29 +- .../github.com/Microsoft/go-winio/hvsock.go | 345 +- .../go-winio/internal/socket/rawaddr.go | 20 + .../go-winio/internal/socket/socket.go | 179 + .../internal/socket/zsyscall_windows.go | 72 + .../github.com/Microsoft/go-winio/pipe.go | 124 +- .../Microsoft/go-winio/pkg/guid/guid.go | 16 +- .../go-winio/pkg/guid/guid_nonwindows.go | 1 + .../go-winio/pkg/guid/guid_windows.go | 3 + .../go-winio/pkg/guid/variant_string.go | 27 + .../Microsoft/go-winio/privilege.go | 32 +- .../github.com/Microsoft/go-winio/reparse.go | 11 +- .../github.com/Microsoft/go-winio/sd.go | 64 +- .../github.com/Microsoft/go-winio/syscall.go | 4 +- .../github.com/Microsoft/go-winio/tools.go | 5 + .../Microsoft/go-winio/zsyscall_windows.go | 45 +- .../hcsshim/osversion/windowsbuilds.go | 12 + .../vendor/github.com/cilium/ebpf/.gitignore | 1 + .../github.com/cilium/ebpf/.golangci.yaml | 1 - .../github.com/cilium/ebpf/ARCHITECTURE.md | 10 +- .../github.com/cilium/ebpf/CONTRIBUTING.md | 4 +- .../github.com/cilium/ebpf/MAINTAINERS.md | 8 + agent/vendor/github.com/cilium/ebpf/Makefile | 88 +- agent/vendor/github.com/cilium/ebpf/README.md | 69 +- .../vendor/github.com/cilium/ebpf/asm/func.go | 47 + .../github.com/cilium/ebpf/asm/func_string.go | 46 +- .../github.com/cilium/ebpf/asm/instruction.go | 553 +- .../vendor/github.com/cilium/ebpf/asm/jump.go | 74 +- .../github.com/cilium/ebpf/asm/metadata.go | 80 + .../github.com/cilium/ebpf/asm/opcode.go | 116 +- .../cilium/ebpf/asm/opcode_string.go | 18 +- .../github.com/cilium/ebpf/asm/register.go | 1 + .../cilium/ebpf/attachtype_string.go | 65 + .../vendor/github.com/cilium/ebpf/btf/btf.go | 897 +++ .../ebpf/{internal => }/btf/btf_types.go | 93 +- .../{internal => }/btf/btf_types_string.go | 0 .../cilium/ebpf/{internal => }/btf/core.go | 485 +- .../cilium/ebpf/{internal => }/btf/doc.go | 3 - .../github.com/cilium/ebpf/btf/ext_info.go | 721 ++ .../github.com/cilium/ebpf/btf/format.go | 319 + .../github.com/cilium/ebpf/btf/handle.go | 121 + .../github.com/cilium/ebpf/btf/strings.go | 128 + .../github.com/cilium/ebpf/btf/types.go | 1212 ++++ .../github.com/cilium/ebpf/collection.go | 692 +- agent/vendor/github.com/cilium/ebpf/doc.go | 9 + .../github.com/cilium/ebpf/elf_reader.go | 770 ++- .../github.com/cilium/ebpf/elf_reader_fuzz.go | 21 - agent/vendor/github.com/cilium/ebpf/info.go | 158 +- .../github.com/cilium/ebpf/internal/align.go | 6 + .../cilium/ebpf/internal/btf/btf.go | 799 --- .../cilium/ebpf/internal/btf/ext_info.go | 303 - .../cilium/ebpf/internal/btf/fuzz.go | 49 - .../cilium/ebpf/internal/btf/strings.go | 60 - .../cilium/ebpf/internal/btf/types.go | 893 --- .../github.com/cilium/ebpf/internal/cpu.go | 4 +- .../github.com/cilium/ebpf/internal/elf.go | 34 + .../github.com/cilium/ebpf/internal/endian.go | 29 - .../cilium/ebpf/internal/endian_be.go | 13 + .../cilium/ebpf/internal/endian_le.go | 13 + .../github.com/cilium/ebpf/internal/errors.go | 203 +- .../github.com/cilium/ebpf/internal/fd.go | 69 - .../cilium/ebpf/internal/feature.go | 10 +- .../github.com/cilium/ebpf/internal/io.go | 48 +- .../github.com/cilium/ebpf/internal/output.go | 84 + .../cilium/ebpf/internal/pinning.go | 43 +- .../github.com/cilium/ebpf/internal/ptr_64.go | 14 - .../cilium/ebpf/internal/sys/doc.go | 6 + .../github.com/cilium/ebpf/internal/sys/fd.go | 96 + .../cilium/ebpf/internal/{ => sys}/ptr.go | 9 +- .../ebpf/internal/{ => sys}/ptr_32_be.go | 3 +- .../ebpf/internal/{ => sys}/ptr_32_le.go | 3 +- .../cilium/ebpf/internal/sys/ptr_64.go | 14 + .../cilium/ebpf/internal/sys/syscall.go | 126 + .../cilium/ebpf/internal/sys/types.go | 1052 +++ .../cilium/ebpf/internal/syscall.go | 245 - .../cilium/ebpf/internal/syscall_string.go | 56 - .../cilium/ebpf/internal/unix/types_linux.go | 38 +- .../cilium/ebpf/internal/unix/types_other.go | 29 +- .../github.com/cilium/ebpf/internal/vdso.go | 150 + .../cilium/ebpf/internal/version.go | 83 +- .../github.com/cilium/ebpf/link/cgroup.go | 14 +- .../github.com/cilium/ebpf/link/iter.go | 39 +- .../github.com/cilium/ebpf/link/kprobe.go | 250 +- .../github.com/cilium/ebpf/link/link.go | 224 +- .../github.com/cilium/ebpf/link/netns.go | 28 +- .../github.com/cilium/ebpf/link/perf_event.go | 265 +- .../github.com/cilium/ebpf/link/program.go | 10 +- .../cilium/ebpf/link/raw_tracepoint.go | 60 +- .../cilium/ebpf/link/socket_filter.go | 40 + .../github.com/cilium/ebpf/link/syscalls.go | 125 +- .../github.com/cilium/ebpf/link/tracepoint.go | 39 +- .../github.com/cilium/ebpf/link/tracing.go | 141 + .../github.com/cilium/ebpf/link/uprobe.go | 262 +- .../vendor/github.com/cilium/ebpf/link/xdp.go | 54 + agent/vendor/github.com/cilium/ebpf/linker.go | 276 +- agent/vendor/github.com/cilium/ebpf/map.go | 602 +- .../github.com/cilium/ebpf/marshalers.go | 85 +- agent/vendor/github.com/cilium/ebpf/prog.go | 611 +- .../github.com/cilium/ebpf/run-tests.sh | 70 +- .../vendor/github.com/cilium/ebpf/syscalls.go | 410 +- agent/vendor/github.com/cilium/ebpf/types.go | 42 +- .../github.com/cilium/ebpf/types_string.go | 70 +- .../coreos/go-systemd/v22/dbus/dbus.go | 5 + .../coreos/go-systemd/v22/dbus/methods.go | 34 + .../vendor/github.com/godbus/dbus/v5/auth.go | 10 +- .../vendor/github.com/godbus/dbus/v5/conn.go | 27 +- .../github.com/godbus/dbus/v5/conn_other.go | 9 +- .../vendor/github.com/godbus/dbus/v5/dbus.go | 14 +- agent/vendor/github.com/godbus/dbus/v5/doc.go | 10 +- .../github.com/godbus/dbus/v5/escape.go | 84 + .../github.com/godbus/dbus/v5/export.go | 19 +- .../github.com/godbus/dbus/v5/homedir.go | 29 +- .../godbus/dbus/v5/homedir_dynamic.go | 15 - .../godbus/dbus/v5/homedir_static.go | 45 - .../github.com/godbus/dbus/v5/message.go | 16 +- .../godbus/dbus/v5/server_interfaces.go | 2 +- agent/vendor/github.com/godbus/dbus/v5/sig.go | 2 +- .../godbus/dbus/v5/transport_unix.go | 12 +- .../godbus/dbus/v5/transport_zos.go | 6 + .../github.com/godbus/dbus/v5/variant.go | 2 +- .../github.com/golang/mock/gomock/call.go | 76 +- .../github.com/golang/mock/gomock/callset.go | 7 +- .../golang/mock/gomock/controller.go | 110 +- .../github.com/golang/mock/gomock/matchers.go | 94 +- .../golang/mock/mockgen/model/model.go | 31 +- .../github.com/sirupsen/logrus/README.md | 4 +- .../github.com/sirupsen/logrus/buffer_pool.go | 9 - .../github.com/sirupsen/logrus/entry.go | 21 +- .../github.com/sirupsen/logrus/logger.go | 13 + .../testify/assert/assertion_compare.go | 76 +- .../assert/assertion_compare_can_convert.go | 16 + .../assert/assertion_compare_legacy.go | 16 + .../testify/assert/assertion_format.go | 22 + .../testify/assert/assertion_forward.go | 44 + .../testify/assert/assertion_order.go | 8 +- .../stretchr/testify/assert/assertions.go | 190 +- .../stretchr/testify/require/require.go | 56 + .../testify/require/require_forward.go | 44 + .../stretchr/testify/suite/suite.go | 25 +- .../vishvananda/netlink/.travis.yml | 20 - .../github.com/vishvananda/netlink/README.md | 2 +- .../vishvananda/netlink/addr_linux.go | 23 +- .../vishvananda/netlink/bpf_linux.go | 24 + .../vishvananda/netlink/class_linux.go | 18 +- .../vishvananda/netlink/conntrack_linux.go | 143 +- .../vishvananda/netlink/devlink_linux.go | 466 +- .../github.com/vishvananda/netlink/filter.go | 95 +- .../vishvananda/netlink/filter_linux.go | 332 +- .../vishvananda/netlink/handle_linux.go | 45 +- .../vishvananda/netlink/handle_unspecified.go | 6 + .../vishvananda/netlink/inet_diag.go | 1 + .../vishvananda/netlink/ipset_linux.go | 215 +- .../github.com/vishvananda/netlink/link.go | 269 +- .../vishvananda/netlink/link_linux.go | 321 +- .../github.com/vishvananda/netlink/neigh.go | 1 + .../vishvananda/netlink/neigh_linux.go | 34 +- .../netlink/netlink_unspecified.go | 16 + .../vishvananda/netlink/netns_linux.go | 6 +- .../vishvananda/netlink/nl/conntrack_linux.go | 1 + .../vishvananda/netlink/nl/devlink_linux.go | 66 +- .../vishvananda/netlink/nl/link_linux.go | 45 + .../vishvananda/netlink/nl/lwt_linux.go | 29 + .../vishvananda/netlink/nl/nl_linux.go | 128 +- .../nl/{parse_attr.go => parse_attr_linux.go} | 14 +- .../vishvananda/netlink/nl/rdma_link_linux.go | 4 + .../vishvananda/netlink/nl/seg6_linux.go | 4 +- .../vishvananda/netlink/nl/syscall.go | 2 +- .../vishvananda/netlink/nl/tc_linux.go | 136 + .../vishvananda/netlink/proc_event_linux.go | 217 + .../github.com/vishvananda/netlink/qdisc.go | 16 +- .../vishvananda/netlink/qdisc_linux.go | 25 +- .../vishvananda/netlink/rdma_link_linux.go | 51 + .../github.com/vishvananda/netlink/route.go | 36 +- .../vishvananda/netlink/route_linux.go | 457 +- .../vishvananda/netlink/route_unspecified.go | 10 + .../github.com/vishvananda/netlink/rule.go | 14 +- .../vishvananda/netlink/rule_linux.go | 13 +- .../vishvananda/netlink/socket_linux.go | 105 +- .../github.com/vishvananda/netlink/tcp.go | 66 + .../vishvananda/netlink/tcp_linux.go | 76 +- .../vishvananda/netlink/xfrm_policy.go | 13 +- .../vishvananda/netlink/xfrm_policy_linux.go | 14 +- .../vishvananda/netlink/xfrm_state.go | 4 +- .../vishvananda/netlink/xfrm_state_linux.go | 37 +- .../vishvananda/netns/.golangci.yml | 2 + .../github.com/vishvananda/netns/README.md | 1 + .../github.com/vishvananda/netns/doc.go | 9 + .../vishvananda/netns/netns_linux.go | 117 +- .../{netns_unspecified.go => netns_others.go} | 17 + .../netns/{netns.go => nshandle_linux.go} | 12 +- .../vishvananda/netns/nshandle_others.go | 45 + agent/vendor/go.etcd.io/bbolt/.gitignore | 2 + agent/vendor/go.etcd.io/bbolt/.travis.yml | 3 +- agent/vendor/go.etcd.io/bbolt/Makefile | 2 - agent/vendor/go.etcd.io/bbolt/README.md | 5 +- agent/vendor/go.etcd.io/bbolt/bolt_unix.go | 17 +- agent/vendor/go.etcd.io/bbolt/compact.go | 114 + agent/vendor/go.etcd.io/bbolt/db.go | 66 +- .../vendor/go.etcd.io/bbolt/freelist_hmap.go | 6 +- agent/vendor/go.etcd.io/bbolt/mlock_unix.go | 36 + .../vendor/go.etcd.io/bbolt/mlock_windows.go | 11 + agent/vendor/go.etcd.io/bbolt/tx.go | 3 +- .../x/tools/cmd/stringer/stringer.go | 657 ++ .../x/tools/go/gcexportdata/gcexportdata.go | 186 + .../x/tools/go/gcexportdata/importer.go | 75 + .../tools/go/internal/packagesdriver/sizes.go | 49 + .../golang.org/x/tools/go/packages/doc.go | 220 + .../x/tools/go/packages/external.go | 101 + .../golang.org/x/tools/go/packages/golist.go | 1183 ++++ .../x/tools/go/packages/golist_overlay.go | 575 ++ .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 1332 ++++ .../golang.org/x/tools/go/packages/visit.go | 59 + .../x/tools/go/types/objectpath/objectpath.go | 824 +++ .../x/tools/internal/gcimporter/bimport.go | 150 + .../x/tools/internal/gcimporter/exportdata.go | 99 + .../x/tools/internal/gcimporter/gcimporter.go | 274 + .../x/tools/internal/gcimporter/iexport.go | 1322 ++++ .../x/tools/internal/gcimporter/iimport.go | 1083 +++ .../internal/gcimporter/newInterface10.go | 22 + .../internal/gcimporter/newInterface11.go | 14 + .../internal/gcimporter/support_go117.go | 16 + .../internal/gcimporter/support_go118.go | 37 + .../x/tools/internal/gcimporter/unified_no.go | 10 + .../tools/internal/gcimporter/unified_yes.go | 10 + .../x/tools/internal/gcimporter/ureader_no.go | 19 + .../tools/internal/gcimporter/ureader_yes.go | 728 ++ .../internal/packagesinternal/packages.go | 30 + .../x/tools/internal/pkgbits/codes.go | 77 + .../x/tools/internal/pkgbits/decoder.go | 517 ++ .../x/tools/internal/pkgbits/doc.go | 32 + .../x/tools/internal/pkgbits/encoder.go | 383 ++ .../x/tools/internal/pkgbits/flags.go | 9 + .../x/tools/internal/pkgbits/frames_go1.go | 21 + .../x/tools/internal/pkgbits/frames_go17.go | 28 + .../x/tools/internal/pkgbits/reloc.go | 42 + .../x/tools/internal/pkgbits/support.go | 17 + .../x/tools/internal/pkgbits/sync.go | 113 + .../internal/pkgbits/syncmarker_string.go | 89 + .../internal/tokeninternal/tokeninternal.go | 151 + .../tools/internal/typesinternal/errorcode.go | 1560 +++++ .../typesinternal/errorcode_string.go | 179 + .../x/tools/internal/typesinternal/types.go | 68 + .../tools/internal/typesinternal/types_118.go | 19 + agent/vendor/modules.txt | 50 +- ecs-agent/go.mod | 18 +- ecs-agent/go.sum | 904 ++- .../netlib/model/ecscni/appmesh_config.go | 66 + .../netlib/model/ecscni/bridge_config.go | 43 + ecs-agent/netlib/model/ecscni/client.go | 86 + ecs-agent/netlib/model/ecscni/eni_config.go | 76 + .../model/ecscni/generate_mocks_linux.go | 6 + .../model/ecscni/generate_mocks_windows.go | 6 + ecs-agent/netlib/model/ecscni/ipam_config.go | 43 + ecs-agent/netlib/model/ecscni/netconfig.go | 96 + .../netlib/model/ecscni/netconfig_test.go | 24 + ecs-agent/netlib/model/ecscni/nsutil_linux.go | 141 + .../netlib/model/ecscni/nsutil_windows.go | 90 + ecs-agent/netlib/model/ecscni/plugin.go | 31 + .../model/ecscni/serviceconnect_config.go | 103 + .../netlib/model/ecscni/vpcbranch_config.go | 52 + .../netlib/model/ecscni/vpceni_config.go | 61 + .../netlib/model/ecscni/vpctunnel_config.go | 52 + .../model/serviceconnect/serviceconnect.go | 44 + ecs-agent/utils/uuid/uuid.go | 19 + ecs-agent/utils/uuid/uuid_test.go | 48 + .../github.com/Microsoft/go-winio/.gitignore | 1 + .../github.com/Microsoft/go-winio/CODEOWNERS | 1 + .../github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 37 + .../github.com/Microsoft/go-winio/backup.go | 280 + .../github.com/Microsoft/go-winio/ea.go | 137 + .../github.com/Microsoft/go-winio/file.go | 329 + .../github.com/Microsoft/go-winio/fileinfo.go | 73 + .../github.com/Microsoft/go-winio/hvsock.go | 316 + .../github.com/Microsoft/go-winio/pipe.go | 517 ++ .../Microsoft/go-winio/pkg/guid/guid.go | 228 + .../go-winio/pkg/guid/guid_nonwindows.go | 15 + .../go-winio/pkg/guid/guid_windows.go | 10 + .../pkg/security/grantvmgroupaccess.go | 160 + .../go-winio/pkg/security/syscall_windows.go | 7 + .../go-winio/pkg/security/zsyscall_windows.go | 70 + .../Microsoft/go-winio/privilege.go | 203 + .../github.com/Microsoft/go-winio/reparse.go | 128 + .../github.com/Microsoft/go-winio/sd.go | 98 + .../github.com/Microsoft/go-winio/syscall.go | 3 + .../github.com/Microsoft/go-winio/vhd/vhd.go | 350 + .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 + .../Microsoft/go-winio/zsyscall_windows.go | 427 ++ .../github.com/Microsoft/hcsshim/LICENSE | 21 + .../hcsshim/computestorage/attach.go | 38 + .../hcsshim/computestorage/destroy.go | 26 + .../hcsshim/computestorage/detach.go | 26 + .../hcsshim/computestorage/export.go | 46 + .../hcsshim/computestorage/format.go | 26 + .../hcsshim/computestorage/helpers.go | 193 + .../hcsshim/computestorage/import.go | 41 + .../hcsshim/computestorage/initialize.go | 38 + .../Microsoft/hcsshim/computestorage/mount.go | 27 + .../Microsoft/hcsshim/computestorage/setup.go | 74 + .../hcsshim/computestorage/storage.go | 50 + .../computestorage/zsyscall_windows.go | 319 + .../github.com/Microsoft/hcsshim/hcn/hcn.go | 328 + .../Microsoft/hcsshim/hcn/hcnendpoint.go | 388 ++ .../Microsoft/hcsshim/hcn/hcnerrors.go | 164 + .../Microsoft/hcsshim/hcn/hcnglobals.go | 138 + .../Microsoft/hcsshim/hcn/hcnloadbalancer.go | 311 + .../Microsoft/hcsshim/hcn/hcnnamespace.go | 446 ++ .../Microsoft/hcsshim/hcn/hcnnetwork.go | 462 ++ .../Microsoft/hcsshim/hcn/hcnpolicy.go | 344 + .../Microsoft/hcsshim/hcn/hcnroute.go | 266 + .../Microsoft/hcsshim/hcn/hcnsupport.go | 147 + .../Microsoft/hcsshim/hcn/zsyscall_windows.go | 795 +++ .../hcsshim/internal/cni/registry.go | 110 + .../Microsoft/hcsshim/internal/cow/cow.go | 97 + .../hcsshim/internal/hcs/callback.go | 161 + .../Microsoft/hcsshim/internal/hcs/errors.go | 343 + .../Microsoft/hcsshim/internal/hcs/process.go | 589 ++ .../hcsshim/internal/hcs/schema1/schema1.go | 250 + .../internal/hcs/schema2/attachment.go | 36 + .../hcsshim/internal/hcs/schema2/battery.go | 13 + .../hcs/schema2/cache_query_stats_response.go | 18 + .../hcsshim/internal/hcs/schema2/chipset.go | 27 + .../internal/hcs/schema2/close_handle.go | 14 + .../hcsshim/internal/hcs/schema2/com_port.go | 17 + .../internal/hcs/schema2/compute_system.go | 26 + .../internal/hcs/schema2/configuration.go | 72 + .../internal/hcs/schema2/console_size.go | 16 + .../hcsshim/internal/hcs/schema2/container.go | 36 + ...r_credential_guard_add_instance_request.go | 16 + ...edential_guard_hv_socket_service_config.go | 15 + .../container_credential_guard_instance.go | 16 + ...ainer_credential_guard_modify_operation.go | 17 + ...iner_credential_guard_operation_request.go | 15 + ...redential_guard_remove_instance_request.go | 14 + .../container_credential_guard_state.go | 25 + .../container_credential_guard_system_info.go | 14 + .../schema2/container_memory_information.go | 25 + .../hcsshim/internal/hcs/schema2/cpu_group.go | 15 + .../hcs/schema2/cpu_group_affinity.go | 15 + .../internal/hcs/schema2/cpu_group_config.go | 18 + .../hcs/schema2/cpu_group_configurations.go | 15 + .../hcs/schema2/cpu_group_operations.go | 18 + .../hcs/schema2/cpu_group_property.go | 15 + .../hcs/schema2/create_group_operation.go | 17 + .../hcs/schema2/delete_group_operation.go | 15 + .../hcsshim/internal/hcs/schema2/device.go | 27 + .../hcsshim/internal/hcs/schema2/devices.go | 46 + .../hcs/schema2/enhanced_mode_video.go | 14 + .../hcs/schema2/flexible_io_device.go | 18 + .../internal/hcs/schema2/guest_connection.go | 19 + .../hcs/schema2/guest_connection_info.go | 21 + .../hcs/schema2/guest_crash_reporting.go | 14 + .../hcsshim/internal/hcs/schema2/guest_os.go | 14 + .../internal/hcs/schema2/guest_state.go | 22 + .../schema2/host_processor_modify_request.go | 16 + .../internal/hcs/schema2/hosted_system.go | 16 + .../hcsshim/internal/hcs/schema2/hv_socket.go | 16 + .../internal/hcs/schema2/hv_socket_2.go | 15 + .../internal/hcs/schema2/hv_socket_address.go | 17 + .../hcs/schema2/hv_socket_service_config.go | 28 + .../hcs/schema2/hv_socket_system_config.go | 22 + .../hcs/schema2/interrupt_moderation_mode.go | 42 + .../internal/hcs/schema2/iov_settings.go | 22 + .../hcsshim/internal/hcs/schema2/keyboard.go | 13 + .../hcsshim/internal/hcs/schema2/layer.go | 21 + .../hcs/schema2/linux_kernel_direct.go | 18 + .../internal/hcs/schema2/logical_processor.go | 18 + .../internal/hcs/schema2/mapped_directory.go | 20 + .../internal/hcs/schema2/mapped_pipe.go | 18 + .../hcsshim/internal/hcs/schema2/memory.go | 14 + .../hcsshim/internal/hcs/schema2/memory_2.go | 49 + .../hcs/schema2/memory_information_for_vm.go | 18 + .../internal/hcs/schema2/memory_stats.go | 19 + .../model_container_definition_device.go | 14 + .../hcs/schema2/model_device_category.go | 15 + .../hcs/schema2/model_device_extension.go | 15 + .../hcs/schema2/model_device_instance.go | 17 + .../hcs/schema2/model_device_namespace.go | 16 + .../hcs/schema2/model_interface_class.go | 16 + .../internal/hcs/schema2/model_namespace.go | 15 + .../hcs/schema2/model_object_directory.go | 18 + .../hcs/schema2/model_object_namespace.go | 16 + .../hcs/schema2/model_object_symlink.go | 18 + .../hcs/schema2/modification_request.go | 15 + .../hcs/schema2/modify_setting_request.go | 20 + .../hcsshim/internal/hcs/schema2/mouse.go | 13 + .../internal/hcs/schema2/network_adapter.go | 17 + .../internal/hcs/schema2/networking.go | 23 + .../hcs/schema2/pause_notification.go | 15 + .../internal/hcs/schema2/pause_options.go | 17 + .../hcsshim/internal/hcs/schema2/plan9.go | 14 + .../internal/hcs/schema2/plan9_share.go | 34 + .../internal/hcs/schema2/process_details.go | 33 + .../hcs/schema2/process_modify_request.go | 19 + .../hcs/schema2/process_parameters.go | 46 + .../internal/hcs/schema2/process_status.go | 21 + .../hcsshim/internal/hcs/schema2/processor.go | 18 + .../internal/hcs/schema2/processor_2.go | 23 + .../internal/hcs/schema2/processor_stats.go | 19 + .../hcs/schema2/processor_topology.go | 15 + .../internal/hcs/schema2/properties.go | 54 + .../internal/hcs/schema2/property_query.go | 15 + .../internal/hcs/schema2/property_type.go | 26 + .../hcs/schema2/rdp_connection_options.go | 16 + .../internal/hcs/schema2/registry_changes.go | 16 + .../internal/hcs/schema2/registry_key.go | 18 + .../internal/hcs/schema2/registry_value.go | 30 + .../internal/hcs/schema2/restore_state.go | 19 + .../internal/hcs/schema2/save_options.go | 19 + .../hcsshim/internal/hcs/schema2/scsi.go | 16 + .../hcs/schema2/service_properties.go | 18 + .../schema2/shared_memory_configuration.go | 14 + .../hcs/schema2/shared_memory_region.go | 22 + .../hcs/schema2/shared_memory_region_info.go | 16 + .../internal/hcs/schema2/silo_properties.go | 17 + .../internal/hcs/schema2/statistics.go | 29 + .../hcsshim/internal/hcs/schema2/storage.go | 21 + .../internal/hcs/schema2/storage_qo_s.go | 16 + .../internal/hcs/schema2/storage_stats.go | 21 + .../hcsshim/internal/hcs/schema2/topology.go | 16 + .../hcsshim/internal/hcs/schema2/uefi.go | 20 + .../internal/hcs/schema2/uefi_boot_entry.go | 22 + .../hcsshim/internal/hcs/schema2/version.go | 16 + .../internal/hcs/schema2/video_monitor.go | 18 + .../internal/hcs/schema2/virtual_machine.go | 32 + .../internal/hcs/schema2/virtual_node_info.go | 20 + .../hcs/schema2/virtual_p_mem_controller.go | 20 + .../hcs/schema2/virtual_p_mem_device.go | 18 + .../hcs/schema2/virtual_p_mem_mapping.go | 15 + .../hcs/schema2/virtual_pci_device.go | 16 + .../hcs/schema2/virtual_pci_function.go | 18 + .../internal/hcs/schema2/virtual_smb.go | 16 + .../internal/hcs/schema2/virtual_smb_share.go | 20 + .../hcs/schema2/virtual_smb_share_options.go | 62 + .../hcsshim/internal/hcs/schema2/vm_memory.go | 26 + .../hcs/schema2/vm_processor_limits.go | 22 + .../hcs/schema2/windows_crash_reporting.go | 16 + .../Microsoft/hcsshim/internal/hcs/service.go | 49 + .../Microsoft/hcsshim/internal/hcs/system.go | 815 +++ .../Microsoft/hcsshim/internal/hcs/utils.go | 62 + .../hcsshim/internal/hcs/waithelper.go | 68 + .../hcsshim/internal/hcserror/hcserror.go | 47 + .../hcsshim/internal/interop/interop.go | 23 + .../internal/interop/zsyscall_windows.go | 48 + .../hcsshim/internal/jobobject/iocp.go | 111 + .../hcsshim/internal/jobobject/jobobject.go | 538 ++ .../hcsshim/internal/jobobject/limits.go | 315 + .../Microsoft/hcsshim/internal/log/g.go | 23 + .../hcsshim/internal/logfields/fields.go | 32 + .../Microsoft/hcsshim/internal/oc/exporter.go | 43 + .../Microsoft/hcsshim/internal/oc/span.go | 17 + .../Microsoft/hcsshim/internal/queue/mq.go | 92 + .../hcsshim/internal/regstate/regstate.go | 288 + .../internal/regstate/zsyscall_windows.go | 51 + .../hcsshim/internal/runhcs/container.go | 71 + .../Microsoft/hcsshim/internal/runhcs/util.go | 16 + .../Microsoft/hcsshim/internal/runhcs/vm.go | 43 + .../hcsshim/internal/timeout/timeout.go | 74 + .../hcsshim/internal/vmcompute/vmcompute.go | 610 ++ .../internal/vmcompute/zsyscall_windows.go | 581 ++ .../hcsshim/internal/winapi/console.go | 44 + .../hcsshim/internal/winapi/devices.go | 13 + .../hcsshim/internal/winapi/errors.go | 15 + .../hcsshim/internal/winapi/filesystem.go | 110 + .../hcsshim/internal/winapi/jobobject.go | 218 + .../hcsshim/internal/winapi/logon.go | 30 + .../hcsshim/internal/winapi/memory.go | 4 + .../Microsoft/hcsshim/internal/winapi/net.go | 3 + .../Microsoft/hcsshim/internal/winapi/path.go | 11 + .../hcsshim/internal/winapi/process.go | 65 + .../hcsshim/internal/winapi/processor.go | 7 + .../hcsshim/internal/winapi/system.go | 53 + .../hcsshim/internal/winapi/thread.go | 12 + .../hcsshim/internal/winapi/utils.go | 80 + .../hcsshim/internal/winapi/winapi.go | 5 + .../internal/winapi/zsyscall_windows.go | 354 + .../hcsshim/osversion/osversion_windows.go | 50 + .../hcsshim/osversion/windowsbuilds.go | 50 + .../github.com/containerd/cgroups/LICENSE | 201 + .../containerd/cgroups/stats/v1/doc.go | 17 + .../containerd/cgroups/stats/v1/metrics.pb.go | 6125 +++++++++++++++++ .../cgroups/stats/v1/metrics.pb.txt | 790 +++ .../containerd/cgroups/stats/v1/metrics.proto | 158 + .../containernetworking/cni/LICENSE | 202 + .../containernetworking/cni/libcni/api.go | 679 ++ .../containernetworking/cni/libcni/conf.go | 270 + .../cni/pkg/invoke/args.go | 128 + .../cni/pkg/invoke/delegate.go | 80 + .../cni/pkg/invoke/exec.go | 187 + .../cni/pkg/invoke/find.go | 48 + .../cni/pkg/invoke/os_unix.go | 20 + .../cni/pkg/invoke/os_windows.go | 18 + .../cni/pkg/invoke/raw_exec.go | 88 + .../cni/pkg/types/020/types.go | 189 + .../cni/pkg/types/040/types.go | 306 + .../cni/pkg/types/100/types.go | 307 + .../containernetworking/cni/pkg/types/args.go | 122 + .../cni/pkg/types/create/create.go | 56 + .../cni/pkg/types/internal/convert.go | 92 + .../cni/pkg/types/internal/create.go | 66 + .../cni/pkg/types/types.go | 234 + .../cni/pkg/utils/utils.go | 84 + .../cni/pkg/version/conf.go | 26 + .../cni/pkg/version/plugin.go | 144 + .../cni/pkg/version/reconcile.go | 49 + .../cni/pkg/version/version.go | 89 + .../containernetworking/plugins/LICENSE | 201 + .../plugins/pkg/ns/README.md | 41 + .../plugins/pkg/ns/ns_linux.go | 234 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 +++ .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 ++ .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 ++++++++ .../descriptor/descriptor_gostring.gen.go | 752 ++ .../protoc-gen-gogo/descriptor/helper.go | 390 ++ .../github.com/golang/groupcache/LICENSE | 191 + .../github.com/golang/groupcache/lru/lru.go | 133 + .../github.com/golang/mock/gomock/call.go | 76 +- .../github.com/golang/mock/gomock/callset.go | 7 +- .../golang/mock/gomock/controller.go | 110 +- .../github.com/golang/mock/gomock/matchers.go | 94 +- .../golang/mock/mockgen/model/model.go | 31 +- .../github.com/sirupsen/logrus/.gitignore | 4 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + .../github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 259 + .../vendor/github.com/sirupsen/logrus/LICENSE | 21 + .../github.com/sirupsen/logrus/README.md | 513 ++ .../github.com/sirupsen/logrus/alt_exit.go | 76 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + .../github.com/sirupsen/logrus/buffer_pool.go | 43 + .../vendor/github.com/sirupsen/logrus/doc.go | 26 + .../github.com/sirupsen/logrus/entry.go | 442 ++ .../github.com/sirupsen/logrus/exported.go | 270 + .../github.com/sirupsen/logrus/formatter.go | 78 + .../github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 128 + .../github.com/sirupsen/logrus/logger.go | 417 ++ .../github.com/sirupsen/logrus/logrus.go | 186 + .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/text_formatter.go | 339 + .../github.com/sirupsen/logrus/writer.go | 70 + .../testify/assert/assertion_compare.go | 76 +- .../assert/assertion_compare_can_convert.go | 16 + .../assert/assertion_compare_legacy.go | 16 + .../testify/assert/assertion_format.go | 22 + .../testify/assert/assertion_forward.go | 44 + .../testify/assert/assertion_order.go | 8 +- .../stretchr/testify/assert/assertions.go | 190 +- .../stretchr/testify/require/require.go | 56 + .../testify/require/require_forward.go | 44 + ecs-agent/vendor/go.opencensus.io/.gitignore | 9 + ecs-agent/vendor/go.opencensus.io/AUTHORS | 1 + .../vendor/go.opencensus.io/CONTRIBUTING.md | 63 + ecs-agent/vendor/go.opencensus.io/LICENSE | 202 + ecs-agent/vendor/go.opencensus.io/Makefile | 97 + ecs-agent/vendor/go.opencensus.io/README.md | 267 + .../vendor/go.opencensus.io/appveyor.yml | 24 + .../go.opencensus.io/internal/internal.go | 37 + .../go.opencensus.io/internal/sanitize.go | 50 + .../internal/traceinternals.go | 53 + .../vendor/go.opencensus.io/opencensus.go | 21 + .../go.opencensus.io/trace/basetypes.go | 129 + .../vendor/go.opencensus.io/trace/config.go | 86 + .../vendor/go.opencensus.io/trace/doc.go | 52 + .../go.opencensus.io/trace/evictedqueue.go | 38 + .../vendor/go.opencensus.io/trace/export.go | 97 + .../trace/internal/internal.go | 22 + .../vendor/go.opencensus.io/trace/lrumap.go | 61 + .../vendor/go.opencensus.io/trace/sampling.go | 75 + .../go.opencensus.io/trace/spanbucket.go | 130 + .../go.opencensus.io/trace/spanstore.go | 308 + .../go.opencensus.io/trace/status_codes.go | 37 + .../vendor/go.opencensus.io/trace/trace.go | 595 ++ .../go.opencensus.io/trace/trace_api.go | 265 + .../go.opencensus.io/trace/trace_go11.go | 33 + .../go.opencensus.io/trace/trace_nongo11.go | 26 + .../trace/tracestate/tracestate.go | 147 + .../golang.org/x/sys/unix/ioctl_signed.go | 70 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 4 +- .../vendor/golang.org/x/sys/unix/ioctl_zos.go | 12 +- .../vendor/golang.org/x/sys/unix/mkerrors.sh | 2 + .../golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - .../golang.org/x/sys/unix/syscall_darwin.go | 3 +- .../x/sys/unix/syscall_dragonfly.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 1 - .../golang.org/x/sys/unix/syscall_linux.go | 10 +- .../x/sys/unix/syscall_linux_386.go | 27 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 27 - .../x/sys/unix/syscall_linux_arm64.go | 10 - .../x/sys/unix/syscall_linux_loong64.go | 5 - .../x/sys/unix/syscall_linux_mips64x.go | 1 - .../x/sys/unix/syscall_linux_mipsx.go | 27 - .../x/sys/unix/syscall_linux_ppc.go | 27 - .../x/sys/unix/syscall_linux_ppc64x.go | 1 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../golang.org/x/sys/unix/syscall_netbsd.go | 2 - .../golang.org/x/sys/unix/syscall_openbsd.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 21 +- .../golang.org/x/sys/unix/syscall_unix.go | 7 + .../x/sys/unix/syscall_zos_s390x.go | 4 +- .../x/sys/unix/zerrors_darwin_amd64.go | 19 + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 15 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 18 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 10 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 10 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 39 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 11 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 39 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 11 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 - .../x/sys/unix/zsyscall_freebsd_386.go | 10 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 - .../x/sys/unix/zsyscall_freebsd_arm.go | 10 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 - .../golang.org/x/sys/unix/zsyscall_linux.go | 10 - .../x/sys/unix/zsyscall_linux_386.go | 10 - .../x/sys/unix/zsyscall_linux_amd64.go | 10 - .../x/sys/unix/zsyscall_linux_arm.go | 10 - .../x/sys/unix/zsyscall_linux_arm64.go | 10 - .../x/sys/unix/zsyscall_linux_mips.go | 10 - .../x/sys/unix/zsyscall_linux_mips64.go | 10 - .../x/sys/unix/zsyscall_linux_mips64le.go | 10 - .../x/sys/unix/zsyscall_linux_mipsle.go | 10 - .../x/sys/unix/zsyscall_linux_ppc.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 - .../x/sys/unix/zsyscall_linux_riscv64.go | 10 - .../x/sys/unix/zsyscall_linux_s390x.go | 10 - .../x/sys/unix/zsyscall_linux_sparc64.go | 10 - .../x/sys/unix/zsyscall_netbsd_386.go | 10 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 - .../x/sys/unix/zsyscall_netbsd_arm.go | 10 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 - .../x/sys/unix/zsyscall_openbsd_386.go | 14 - .../x/sys/unix/zsyscall_openbsd_386.s | 5 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 - .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm.go | 14 - .../x/sys/unix/zsyscall_openbsd_arm.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 - .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 14 - .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 - .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 - .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 - .../x/sys/unix/zsyscall_solaris_amd64.go | 17 +- .../x/sys/unix/zsyscall_zos_s390x.go | 4 +- .../x/sys/unix/ztypes_darwin_amd64.go | 11 + .../x/sys/unix/ztypes_darwin_arm64.go | 11 + .../golang.org/x/sys/windows/types_windows.go | 4 +- .../x/tools/internal/typeparams/common.go | 1 - ecs-agent/vendor/modules.txt | 69 +- 686 files changed, 72665 insertions(+), 6949 deletions(-) create mode 100644 agent/vendor/github.com/Microsoft/go-winio/.gitattributes create mode 100644 agent/vendor/github.com/Microsoft/go-winio/.golangci.yml create mode 100644 agent/vendor/github.com/Microsoft/go-winio/SECURITY.md create mode 100644 agent/vendor/github.com/Microsoft/go-winio/doc.go create mode 100644 agent/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go create mode 100644 agent/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go create mode 100644 agent/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go create mode 100644 agent/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go create mode 100644 agent/vendor/github.com/Microsoft/go-winio/tools.go create mode 100644 agent/vendor/github.com/cilium/ebpf/MAINTAINERS.md create mode 100644 agent/vendor/github.com/cilium/ebpf/asm/metadata.go create mode 100644 agent/vendor/github.com/cilium/ebpf/attachtype_string.go create mode 100644 agent/vendor/github.com/cilium/ebpf/btf/btf.go rename agent/vendor/github.com/cilium/ebpf/{internal => }/btf/btf_types.go (71%) rename agent/vendor/github.com/cilium/ebpf/{internal => }/btf/btf_types_string.go (100%) rename agent/vendor/github.com/cilium/ebpf/{internal => }/btf/core.go (64%) rename agent/vendor/github.com/cilium/ebpf/{internal => }/btf/doc.go (71%) create mode 100644 agent/vendor/github.com/cilium/ebpf/btf/ext_info.go create mode 100644 agent/vendor/github.com/cilium/ebpf/btf/format.go create mode 100644 agent/vendor/github.com/cilium/ebpf/btf/handle.go create mode 100644 agent/vendor/github.com/cilium/ebpf/btf/strings.go create mode 100644 agent/vendor/github.com/cilium/ebpf/btf/types.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/align.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/btf/btf.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/btf/strings.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/btf/types.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/endian.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/endian_be.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/endian_le.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/fd.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/output.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/ptr_64.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/sys/doc.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/sys/fd.go rename agent/vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr.go (71%) rename agent/vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr_32_be.go (80%) rename agent/vendor/github.com/cilium/ebpf/internal/{ => sys}/ptr_32_le.go (75%) create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/sys/syscall.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/sys/types.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/syscall.go delete mode 100644 agent/vendor/github.com/cilium/ebpf/internal/syscall_string.go create mode 100644 agent/vendor/github.com/cilium/ebpf/internal/vdso.go create mode 100644 agent/vendor/github.com/cilium/ebpf/link/socket_filter.go create mode 100644 agent/vendor/github.com/cilium/ebpf/link/tracing.go create mode 100644 agent/vendor/github.com/cilium/ebpf/link/xdp.go create mode 100644 agent/vendor/github.com/godbus/dbus/v5/escape.go delete mode 100644 agent/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go delete mode 100644 agent/vendor/github.com/godbus/dbus/v5/homedir_static.go create mode 100644 agent/vendor/github.com/godbus/dbus/v5/transport_zos.go create mode 100644 agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go create mode 100644 agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go delete mode 100644 agent/vendor/github.com/vishvananda/netlink/.travis.yml create mode 100644 agent/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go rename agent/vendor/github.com/vishvananda/netlink/nl/{parse_attr.go => parse_attr_linux.go} (79%) create mode 100644 agent/vendor/github.com/vishvananda/netlink/proc_event_linux.go create mode 100644 agent/vendor/github.com/vishvananda/netns/.golangci.yml create mode 100644 agent/vendor/github.com/vishvananda/netns/doc.go rename agent/vendor/github.com/vishvananda/netns/{netns_unspecified.go => netns_others.go} (63%) rename agent/vendor/github.com/vishvananda/netns/{netns.go => nshandle_linux.go} (75%) create mode 100644 agent/vendor/github.com/vishvananda/netns/nshandle_others.go create mode 100644 agent/vendor/go.etcd.io/bbolt/compact.go create mode 100644 agent/vendor/go.etcd.io/bbolt/mlock_unix.go create mode 100644 agent/vendor/go.etcd.io/bbolt/mlock_windows.go create mode 100644 agent/vendor/golang.org/x/tools/cmd/stringer/stringer.go create mode 100644 agent/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 agent/vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 agent/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/external.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 agent/vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 agent/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/bimport.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/iexport.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/iimport.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go create mode 100644 agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go create mode 100644 agent/vendor/golang.org/x/tools/internal/packagesinternal/packages.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/codes.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/decoder.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/doc.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/encoder.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/flags.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/reloc.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/support.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/sync.go create mode 100644 agent/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go create mode 100644 agent/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go create mode 100644 agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go create mode 100644 agent/vendor/golang.org/x/tools/internal/typesinternal/types.go create mode 100644 agent/vendor/golang.org/x/tools/internal/typesinternal/types_118.go create mode 100644 ecs-agent/netlib/model/ecscni/appmesh_config.go create mode 100644 ecs-agent/netlib/model/ecscni/bridge_config.go create mode 100644 ecs-agent/netlib/model/ecscni/client.go create mode 100644 ecs-agent/netlib/model/ecscni/eni_config.go create mode 100644 ecs-agent/netlib/model/ecscni/generate_mocks_linux.go create mode 100644 ecs-agent/netlib/model/ecscni/generate_mocks_windows.go create mode 100644 ecs-agent/netlib/model/ecscni/ipam_config.go create mode 100644 ecs-agent/netlib/model/ecscni/netconfig.go create mode 100644 ecs-agent/netlib/model/ecscni/netconfig_test.go create mode 100644 ecs-agent/netlib/model/ecscni/nsutil_linux.go create mode 100644 ecs-agent/netlib/model/ecscni/nsutil_windows.go create mode 100644 ecs-agent/netlib/model/ecscni/plugin.go create mode 100644 ecs-agent/netlib/model/ecscni/serviceconnect_config.go create mode 100644 ecs-agent/netlib/model/ecscni/vpcbranch_config.go create mode 100644 ecs-agent/netlib/model/ecscni/vpceni_config.go create mode 100644 ecs-agent/netlib/model/ecscni/vpctunnel_config.go create mode 100644 ecs-agent/netlib/model/serviceconnect/serviceconnect.go create mode 100644 ecs-agent/utils/uuid/uuid.go create mode 100644 ecs-agent/utils/uuid/uuid_test.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/CODEOWNERS create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/README.md create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/file.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/export.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/format.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/import.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/log/g.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 ecs-agent/vendor/github.com/containerd/cgroups/LICENSE create mode 100644 ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/doc.go create mode 100644 ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go create mode 100644 ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt create mode 100644 ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/LICENSE create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/libcni/api.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/libcni/conf.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/040/types.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/100/types.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/args.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/create/create.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/types.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/utils/utils.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/version.go create mode 100644 ecs-agent/vendor/github.com/containernetworking/plugins/LICENSE create mode 100644 ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md create mode 100644 ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 ecs-agent/vendor/github.com/golang/groupcache/LICENSE create mode 100644 ecs-agent/vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/README.md create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/doc.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/entry.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/exported.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/logger.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 ecs-agent/vendor/github.com/sirupsen/logrus/writer.go create mode 100644 ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go create mode 100644 ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go create mode 100644 ecs-agent/vendor/go.opencensus.io/.gitignore create mode 100644 ecs-agent/vendor/go.opencensus.io/AUTHORS create mode 100644 ecs-agent/vendor/go.opencensus.io/CONTRIBUTING.md create mode 100644 ecs-agent/vendor/go.opencensus.io/LICENSE create mode 100644 ecs-agent/vendor/go.opencensus.io/Makefile create mode 100644 ecs-agent/vendor/go.opencensus.io/README.md create mode 100644 ecs-agent/vendor/go.opencensus.io/appveyor.yml create mode 100644 ecs-agent/vendor/go.opencensus.io/internal/internal.go create mode 100644 ecs-agent/vendor/go.opencensus.io/internal/sanitize.go create mode 100644 ecs-agent/vendor/go.opencensus.io/internal/traceinternals.go create mode 100644 ecs-agent/vendor/go.opencensus.io/opencensus.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/basetypes.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/config.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/doc.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/evictedqueue.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/export.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/internal/internal.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/lrumap.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/sampling.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/spanbucket.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/spanstore.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/status_codes.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/trace.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/trace_api.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/trace_go11.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/trace_nongo11.go create mode 100644 ecs-agent/vendor/go.opencensus.io/trace/tracestate/tracestate.go create mode 100644 ecs-agent/vendor/golang.org/x/sys/unix/ioctl_signed.go rename ecs-agent/vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (92%) diff --git a/agent/go.mod b/agent/go.mod index 5a1d30c6944..f28d3f0bed5 100644 --- a/agent/go.mod +++ b/agent/go.mod @@ -15,7 +15,7 @@ require ( github.com/docker/go-connections v0.4.0 github.com/docker/go-units v0.4.0 github.com/fsnotify/fsnotify v1.6.0 - github.com/golang/mock v1.4.1 + github.com/golang/mock v1.6.0 github.com/gorilla/mux v1.8.0 github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 @@ -24,9 +24,9 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.1 github.com/prometheus/client_model v0.2.0 - github.com/stretchr/testify v1.7.0 - github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 - go.etcd.io/bbolt v1.3.5 + github.com/stretchr/testify v1.8.1 + github.com/vishvananda/netlink v1.2.1-beta.2 + go.etcd.io/bbolt v1.3.6 golang.org/x/sys v0.11.0 golang.org/x/tools v0.12.0 google.golang.org/grpc v1.53.0 @@ -34,17 +34,17 @@ require ( ) require ( - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Microsoft/hcsshim v0.8.25 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/Microsoft/hcsshim v0.9.9 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cilium/ebpf v0.6.2 // indirect + github.com/cilium/ebpf v0.9.1 // indirect github.com/containerd/continuity v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/didip/tollbooth v4.0.2+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/godbus/dbus/v5 v5.0.6 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.3.0 // indirect @@ -54,17 +54,13 @@ require ( github.com/moby/sys/mount v0.3.3 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/ginkgo/v2 v2.7.0 // indirect - github.com/onsi/gomega v1.25.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.26.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/vishvananda/netns v0.0.4 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/text v0.12.0 // indirect diff --git a/agent/go.sum b/agent/go.sum index 3c037e33a95..f90a26f72d6 100644 --- a/agent/go.sum +++ b/agent/go.sum @@ -1,78 +1,240 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.25 h1:fRMwXiwk3qDwc0P05eHnh+y2v07JdtsfQ1fuAc69m9g= -github.com/Microsoft/hcsshim v0.8.25/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.9 h1:FYrTiCNOc8ZddNBVkJBxWZYm22rgxHFmxMoGK66sDF0= +github.com/Microsoft/hcsshim v0.9.9/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.36.0 h1:CscTrS+szX5iu34zk2bZrChnGO/GMtUYgMK1Xzs2hYo= github.com/aws/aws-sdk-go v1.36.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit v0.0.0-20210308162251-8959c62cb8f9 h1:BKngsktYtIiCpkptsC6xzc9ZqleZTofjqrjJWPIiVwA= github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit v0.0.0-20210308162251-8959c62cb8f9/go.mod h1:pHmn5q2flnnJgAQUoD3Hys3Fe3uoZnSwRy+Irb5Awak= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1 h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9r2Quag7HMLV8= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -82,58 +244,118 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deniswernert/udev v0.0.0-20170418162847-a12666f7b5a1 h1:Htbvw1SXzSSLHSxmZFMO7FM9+IEwDJfsDfmtBjGbzP4= github.com/deniswernert/udev v0.0.0-20170418162847-a12666f7b5a1/go.mod h1:F2ABndSd1vxGohI2r5toC+++cB3sHTyLMvLh18/awOc= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/didip/tollbooth v4.0.2+incompatible h1:fVSa33JzSz0hoh2NxpwZtksAzAgd7zjmGO20HCZtF4M= github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -147,70 +369,145 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -219,240 +516,542 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= -github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= -github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -463,6 +1062,7 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -472,17 +1072,27 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -492,9 +1102,54 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/agent/vendor/github.com/Microsoft/go-winio/.gitattributes b/agent/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 00000000000..94f480de94e --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/agent/vendor/github.com/Microsoft/go-winio/.gitignore b/agent/vendor/github.com/Microsoft/go-winio/.gitignore index b883f1fdc6d..815e20660e5 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/.gitignore +++ b/agent/vendor/github.com/Microsoft/go-winio/.gitignore @@ -1 +1,10 @@ +.vscode/ + *.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/agent/vendor/github.com/Microsoft/go-winio/.golangci.yml b/agent/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 00000000000..af403bb13a0 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,144 @@ +run: + skip-dirs: + - pkg/etw/sample + +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - goconst # strings that should be constants + - godot # comments end in a period + - misspell + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - stylecheck # golint replacement, less configurable than revive + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nestif # deeply nested ifs + - nilerr # returns nil even with non-nil error + - prealloc # slices that can be pre-allocated + - structcheck # unused struct fields + - unparam # unused function params + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + nolintlint: + allow-leading-space: false + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM + stylecheck: + checks: + - "all" + - "-ST1003" # use revive's var naming diff --git a/agent/vendor/github.com/Microsoft/go-winio/README.md b/agent/vendor/github.com/Microsoft/go-winio/README.md index 683be1dcf9c..7474b4f0b65 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/README.md +++ b/agent/vendor/github.com/Microsoft/go-winio/README.md @@ -13,16 +13,60 @@ Please see the LICENSE file for licensing information. ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) -declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR -appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. -We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves -or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can -attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` ## Code of Conduct @@ -30,8 +74,16 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +## Special Thanks +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. -## Special Thanks -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/agent/vendor/github.com/Microsoft/go-winio/SECURITY.md b/agent/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 00000000000..869fdfe2b24 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/agent/vendor/github.com/Microsoft/go-winio/backup.go b/agent/vendor/github.com/Microsoft/go-winio/backup.go index 2be34af4310..09621c88463 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/backup.go +++ b/agent/vendor/github.com/Microsoft/go-winio/backup.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -7,11 +8,12 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "runtime" "syscall" "unicode/utf16" + + "golang.org/x/sys/windows" ) //sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead @@ -24,7 +26,7 @@ const ( BackupAlternateData BackupLink BackupPropertyData - BackupObjectId + BackupObjectId //revive:disable-line:var-naming ID, not Id BackupReparseData BackupSparseBlock BackupTxfsData @@ -34,14 +36,16 @@ const ( StreamSparseAttributes = uint32(8) ) +//nolint:revive // var-naming: ALL_CAPS const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY ) // BackupHeader represents a backup stream of a file. type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id Id uint32 // The backup stream ID Attributes uint32 // Stream attributes Size int64 // The size of the stream in bytes @@ -49,8 +53,8 @@ type BackupHeader struct { Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). } -type win32StreamId struct { - StreamId uint32 +type win32StreamID struct { + StreamID uint32 Attributes uint32 Size uint64 NameSize uint32 @@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader { // Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if // it was not completely read. func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this if s, ok := r.r.(io.Seeker); ok { // Make sure Seek on io.SeekCurrent sometimes succeeds // before trying the actual seek. @@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { r.bytesLeft = 0 } } - if _, err := io.Copy(ioutil.Discard, r); err != nil { + if _, err := io.Copy(io.Discard, r); err != nil { return nil, err } } - var wsi win32StreamId + var wsi win32StreamID if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { return nil, err } hdr := &BackupHeader{ - Id: wsi.StreamId, + Id: wsi.StreamID, Attributes: wsi.Attributes, Size: int64(wsi.Size), } @@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { } hdr.Name = syscall.UTF16ToString(name) } - if wsi.StreamId == BackupSparseBlock { + if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { return nil, err } @@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { return fmt.Errorf("missing %d bytes", w.bytesLeft) } name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, + wsi := win32StreamID{ + StreamID: hdr.Id, Attributes: hdr.Attributes, Size: uint64(hdr.Size), NameSize: uint32(len(name) * 2), @@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } runtime.KeepAlive(r.f) if bytesRead == 0 { @@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } runtime.KeepAlive(w.f) if int(bytesWritten) != len(b) { @@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32) if err != nil { return nil, err } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + h, err := syscall.CreateFile(&winPath[0], + access, + share, + nil, + createmode, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, + 0) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/agent/vendor/github.com/Microsoft/go-winio/doc.go b/agent/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 00000000000..1f5bfe2d548 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/agent/vendor/github.com/Microsoft/go-winio/ea.go b/agent/vendor/github.com/Microsoft/go-winio/ea.go index 4051c1b33bf..e104dbdfdf9 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/ea.go +++ b/agent/vendor/github.com/Microsoft/go-winio/ea.go @@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) if err != nil { err = errInvalidEaBuffer - return + return ea, nb, err } nameOffset := fileFullEaInformationSize @@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { nextOffset := int(info.NextEntryOffset) if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { err = errInvalidEaBuffer - return + return ea, nb, err } ea.Name = string(b[nameOffset : nameOffset+nameLen]) @@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { if info.NextEntryOffset != 0 { nb = b[info.NextEntryOffset:] } - return + return ea, nb, err } // DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION @@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { eas = append(eas, ea) b = nb } - return + return eas, err } func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { diff --git a/agent/vendor/github.com/Microsoft/go-winio/file.go b/agent/vendor/github.com/Microsoft/go-winio/file.go index 293ab54c80c..175a99d3f42 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/file.go +++ b/agent/vendor/github.com/Microsoft/go-winio/file.go @@ -11,6 +11,8 @@ import ( "sync/atomic" "syscall" "time" + + "golang.org/x/sys/windows" ) //sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx @@ -24,6 +26,8 @@ type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg func (b *atomicBool) swap(new bool) bool { var newInt int32 if new { @@ -32,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool { return atomic.SwapInt32((*int32)(b), newInt) == 1 } -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - var ( ErrFileClosed = errors.New("file has already been closed") ErrTimeout = &timeoutError{} @@ -44,28 +43,28 @@ var ( type timeoutError struct{} -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once var ioCompletionPort syscall.Handle -// ioResult contains the result of an asynchronous IO operation +// ioResult contains the result of an asynchronous IO operation. type ioResult struct { bytes uint32 err error } -// ioOperation represents an outstanding asynchronous Win32 IO +// ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { o syscall.Overlapped ch chan ioResult } -func initIo() { +func initIO() { h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) @@ -94,15 +93,15 @@ type deadlineHandler struct { timedout atomicBool } -// makeWin32File makes a new win32File from an existing file handle +// makeWin32File makes a new win32File from an existing file handle. func makeWin32File(h syscall.Handle) (*win32File, error) { f := &win32File{handle: h} - ioInitOnce.Do(initIo) + ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) if err != nil { return nil, err } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) if err != nil { return nil, err } @@ -121,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { return f, nil } -// closeHandle closes the resources associated with a Win32 handle +// closeHandle closes the resources associated with a Win32 handle. func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. if !f.closing.swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) + _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start syscall.Close(f.handle) @@ -144,14 +143,14 @@ func (f *win32File) Close() error { return nil } -// IsClosed checks if the file has been closed +// IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { return f.closing.isSet() } -// prepareIo prepares for a new IO operation. +// prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { +func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() if f.closing.isSet() { f.wgLock.RUnlock() @@ -164,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) { return c, nil } -// ioCompletionProcessor processes completed async IOs forever +// ioCompletionProcessor processes completed async IOs forever. func ioCompletionProcessor(h syscall.Handle) { for { var bytes uint32 @@ -178,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) { } } -// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) } var timeout timeoutChan @@ -200,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno if f.closing.isSet() { err = ErrFileClosed } @@ -210,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -221,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er // runtime.KeepAlive is needed, as c is passed via native // code to ioCompletionProcessor, c must remain alive // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? runtime.KeepAlive(c) return int(r.bytes), err } // Read reads from a file handle. func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -239,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) { var bytes uint32 err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { + } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF } else { return n, err @@ -254,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) { // Write writes to a file handle. func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -266,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) { var bytes uint32 err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err } diff --git a/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go b/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go index 3ab6bff69c5..702950e72a4 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/agent/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -14,13 +15,18 @@ import ( type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 - pad uint32 // padding + _ uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -48,7 +59,10 @@ type FileStandardInfo struct { // GetFileStandardInfo retrieves ended information for the file. func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -65,7 +79,12 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/agent/vendor/github.com/Microsoft/go-winio/hvsock.go b/agent/vendor/github.com/Microsoft/go-winio/hvsock.go index b2b644d002a..52f1c280f6a 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/agent/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -4,6 +4,8 @@ package winio import ( + "context" + "errors" "fmt" "io" "net" @@ -12,16 +14,87 @@ import ( "time" "unsafe" + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" "github.com/Microsoft/go-winio/pkg/guid" ) -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind +const afHVSock = 34 // AF_HYPERV -const ( - afHvSock = 34 // AF_HYPERV +// Well known Service and VM IDs +//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - socketError = ^uintptr(0) -) +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} // An HvsockAddr is an address for a AF_HYPERV socket. type HvsockAddr struct { @@ -36,8 +109,10 @@ type rawHvsockAddr struct { ServiceID guid.GUID } +var _ socket.RawSockaddr = &rawHvsockAddr{} + // Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { +func (*HvsockAddr) Network() string { return "hvsock" } @@ -47,14 +122,14 @@ func (addr *HvsockAddr) String() string { // VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g := hvsockVsockServiceTemplate() // make a copy g.Data1 = port return g } func (addr *HvsockAddr) raw() rawHvsockAddr { return rawHvsockAddr{ - Family: afHvSock, + Family: afHVSock, VMID: addr.VMID, ServiceID: addr.ServiceID, } @@ -65,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { addr.ServiceID = raw.ServiceID } +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + // HvsockListener is a socket listener for the AF_HYPERV address family. type HvsockListener struct { sock *win32File addr HvsockAddr } +var _ net.Listener = &HvsockListener{} + // HvsockConn is a connected socket of the AF_HYPERV address family. type HvsockConn struct { sock *win32File local, remote HvsockAddr } -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } @@ -94,12 +197,12 @@ func newHvSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("listen", err) } sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } @@ -121,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr { // Accept waits for the next connection and returns it. func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("accept", err) } @@ -130,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { sock.Close() } }() - c, err := l.sock.prepareIo() + c, err := l.sock.prepareIO() if err != nil { return nil, l.opErr("accept", err) } defer l.sock.wg.Done() // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } + conn := &HvsockConn{ sock: sock, } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(windows.Handle(sock.handle), + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + sock = nil return conn, nil } @@ -160,43 +278,171 @@ func (l *HvsockListener) Close() error { return l.sock.Close() } -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() if err != nil { - return nil, err + return nil, conn.opErr(op, err) } defer func() { if sock != nil { sock.Close() } }() - c, err := sock.prepareIo() + + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) } defer sock.wg.Done() var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + windows.Handle(sock.handle), + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) } - conn := &HvsockConn{ - sock: sock, - remote: *addr, + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + windows.Handle(sock.handle), + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(windows.Handle(sock.handle), &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) } + + conn.sock = sock sock = nil + return conn, nil } -*/ + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} } func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("read", err) } @@ -204,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) } return 0, conn.opErr("read", err) } else if n == 0 { @@ -230,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) { } func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("write", err) } @@ -238,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) } return 0, conn.opErr("write", err) } @@ -257,13 +505,19 @@ func (conn *HvsockConn) IsClosed() bool { return conn.sock.IsClosed() } +// shutdown disables sending or receiving on a socket. func (conn *HvsockConn) shutdown(how int) error { if conn.IsClosed() { - return ErrFileClosed + return socket.ErrSocketClosed } err := syscall.Shutdown(conn.sock.handle, how) if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } return os.NewSyscallError("shutdown", err) } return nil @@ -273,7 +527,7 @@ func (conn *HvsockConn) shutdown(how int) error { func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closeread", err) } return nil } @@ -283,7 +537,7 @@ func (conn *HvsockConn) CloseRead() error { func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closewrite", err) } return nil } @@ -300,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr { // SetDeadline implements the net.Conn SetDeadline method. func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } return nil } diff --git a/agent/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/agent/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 00000000000..7e82f9afa95 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/agent/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 00000000000..39e8c05f8f3 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,179 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, //overlapped + 0, //completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, + 7, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + 0, + 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/agent/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 00000000000..6d2e1a9e443 --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/pipe.go b/agent/vendor/github.com/Microsoft/go-winio/pipe.go index 96700a73de2..ca6e38fc000 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/agent/vendor/github.com/Microsoft/go-winio/pipe.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -13,6 +14,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/windows" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe @@ -21,10 +24,10 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl type ioStatusBlock struct { Status, Information uintptr @@ -51,45 +54,22 @@ type securityDescriptor struct { Control uint16 Owner uintptr Group uintptr - Sacl uintptr - Dacl uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl } -type ntstatus int32 +type ntStatus int32 -func (status ntstatus) Err() error { +func (status ntStatus) Err() error { if status >= 0 { return nil } return rtlNtStatusToDosError(status) } -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - var ( // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") + ErrPipeListenerClosed = net.ErrClosed errPipeWriteClosed = errors.New("pipe has been closed for write") ) @@ -116,9 +96,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr { } func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) } // CloseWrite closes the write side of a message pipe in byte mode. @@ -157,14 +138,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return 0, io.EOF } n, err := f.win32File.Read(b) - if err == io.EOF { + if err == io.EOF { //nolint:errorlint // If this was the result of a zero-byte read, then // it is possible that the read was due to a zero-size // message. Since we are simulating CloseWrite with a // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { + } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -173,7 +154,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return n, err } -func (s pipeAddress) Network() string { +func (pipeAddress) Network() string { return "pipe" } @@ -184,16 +165,21 @@ func (s pipeAddress) String() string { // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { for { - select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + h, err := createFile(*path, + access, + 0, + nil, + syscall.OPEN_EXISTING, + windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, + 0) if err == nil { return h, nil } - if err != cERROR_PIPE_BUSY { + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno return h, &os.PathError{Err: err, Op: "open", Path: *path} } // Wait 10 msec and try again. This is a rather simplistic @@ -213,9 +199,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { } else { absTimeout = time.Now().Add(2 * time.Second) } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { + if errors.Is(err, context.DeadlineExceeded) { return nil, ErrTimeout } return conn, err @@ -251,7 +238,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, // If the pipe is in message mode, return a message byte pipe, which // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { + if flags&windows.PIPE_TYPE_MESSAGE != 0 { return &win32MessageBytePipe{ win32Pipe: win32Pipe{win32File: f, path: path}, }, nil @@ -283,7 +270,11 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy oa.Length = unsafe.Sizeof(oa) var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } defer localFree(ntPath.Buffer) @@ -292,8 +283,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // The security descriptor is only needed for the first pipe. if first { if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) + l := uint32(len(sd)) + sdb := localAlloc(0, l) defer localFree(sdb) copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) @@ -301,28 +292,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // Construct the default named pipe security descriptor. var dacl uintptr if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } defer localFree(dacl) sdb := &securityDescriptor{ Revision: 1, - Control: cSE_DACL_PRESENT, + Control: windows.SE_DACL_PRESENT, Dacl: dacl, } oa.SecurityDescriptor = sdb } } - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE + typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(cFILE_OPEN) + disposition := uint32(windows.FILE_OPEN) access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) if first { - disposition = cFILE_CREATE + disposition = windows.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. @@ -335,7 +326,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy h syscall.Handle iosb ioStatusBlock ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -380,7 +384,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { p.Close() p = nil err = <-ch - if err == nil || err == ErrFileClosed { + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno err = ErrPipeListenerClosed } } @@ -402,12 +406,12 @@ func (l *win32PipeListener) listenerRoutine() { p, err = l.makeConnectedServerPipe() // If the connection was immediately closed by the client, try // again. - if err != cERROR_NO_DATA { + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno break } } responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } syscall.Close(l.firstHandle) @@ -469,15 +473,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { } func connectPipe(p *win32File) error { - c, err := p.prepareIo() + c, err := p.prepareIO() if err != nil { return err } defer p.wg.Done() err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno return err } return nil diff --git a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index 2d9161e2dee..48ce4e92436 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,5 +1,3 @@ -// +build windows - // Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, @@ -9,24 +7,26 @@ package guid import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //nolint:gosec // not used for secure application "encoding" "encoding/binary" "fmt" "strconv" ) +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + // Variant specifies which GUID variant (or "type") of the GUID. It determines // how the entirety of the rest of the GUID is interpreted. type Variant uint8 -// The variants specified by RFC 4122. +// The variants specified by RFC 4122 section 4.1.1. const ( // VariantUnknown specifies a GUID variant which does not conform to one of // the variant encodings specified in RFC 4122. VariantUnknown Variant = iota VariantNCS - VariantRFC4122 + VariantRFC4122 // RFC 4122 VariantMicrosoft VariantFuture ) @@ -36,6 +36,10 @@ const ( // hash of an input string. type Version uint8 +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) @@ -61,7 +65,7 @@ func NewV4() (GUID, error) { // big-endian UTF16 stream of bytes. If that is desired, the string can be // encoded as such before being passed to this function. func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() + b := sha1.New() //nolint:gosec // not used for secure application namespaceBytes := namespace.ToArray() b.Write(namespaceBytes[:]) b.Write(name) diff --git a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go index f64d828c0ba..805bd354842 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package guid diff --git a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go index 83617f4eee9..27e45ee5ccf 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package guid import "golang.org/x/sys/windows" diff --git a/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 00000000000..4076d3132fd --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/agent/vendor/github.com/Microsoft/go-winio/privilege.go b/agent/vendor/github.com/Microsoft/go-winio/privilege.go index c3dd7c21769..0ff9dac906d 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/agent/vendor/github.com/Microsoft/go-winio/privilege.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -24,22 +25,17 @@ import ( //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW const ( - SE_PRIVILEGE_ENABLED = 2 + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" SeSecurityPrivilege = "SeSecurityPrivilege" ) -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - var ( privNames = make(map[string]uint64) privNameMutex sync.Mutex @@ -51,11 +47,9 @@ type PrivilegeError struct { } func (e *PrivilegeError) Error() string { - s := "" + s := "Could not enable privilege " if len(e.privileges) > 1 { s = "Could not enable privileges " - } else { - s = "Could not enable privilege " } for i, p := range e.privileges { if i != 0 { @@ -94,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error { } func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 + privileges := make([]uint64, 0, len(names)) privNameMutex.Lock() defer privNameMutex.Unlock() for _, name := range names { @@ -127,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { return err } - p, _ := windows.GetCurrentProcess() + p := windows.CurrentProcess() var token windows.Token err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) if err != nil { @@ -140,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) } prevState := make([]byte, b.Len()) reqSize := uint32(0) @@ -151,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e if !success { return err } - if err == ERROR_NOT_ALL_ASSIGNED { + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno return &PrivilegeError{privileges} } return nil @@ -177,7 +171,7 @@ func getPrivilegeName(luid uint64) string { } func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) + err := impersonateSelf(windows.SecurityImpersonation) if err != nil { return 0, err } diff --git a/agent/vendor/github.com/Microsoft/go-winio/reparse.go b/agent/vendor/github.com/Microsoft/go-winio/reparse.go index fc1ee4d3a3e..67d1a104a63 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/reparse.go +++ b/agent/vendor/github.com/Microsoft/go-winio/reparse.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package winio import ( @@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte { } var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) + _ = binary.Write(&b, binary.LittleEndian, &data) if !rp.IsMountPoint { flags := uint32(0) if relative { flags |= 1 } - binary.Write(&b, binary.LittleEndian, flags) + _ = binary.Write(&b, binary.LittleEndian, flags) } - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) return b.Bytes() } diff --git a/agent/vendor/github.com/Microsoft/go-winio/sd.go b/agent/vendor/github.com/Microsoft/go-winio/sd.go index db1b370a1b5..5550ef6b61e 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/sd.go +++ b/agent/vendor/github.com/Microsoft/go-winio/sd.go @@ -1,23 +1,25 @@ +//go:build windows // +build windows package winio import ( + "errors" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) //sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW //sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW //sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW //sys localFree(mem uintptr) = LocalFree //sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - type AccountLookupError struct { Name string Err error @@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string { return "lookup account: empty account name specified" } var s string - switch e.Err { - case cERROR_NONE_MAPPED: + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): s = "not found" default: s = e.Err.Error() @@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string { return "lookup account " + e.Name + ": " + s } +func (e *AccountLookupError) Unwrap() error { return e.Err } + type SddlConversionError struct { Sddl string Err error @@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string { return "convert " + e.Sddl + ": " + e.Err.Error() } +func (e *SddlConversionError) Unwrap() error { return e.Err } + // LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid func LookupSidByName(name string) (sid string, err error) { if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} } var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) { return sid, nil } +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer localFree(uintptr(unsafe.Pointer(sidPtr))) + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + func SddlToSecurityDescriptor(sddl string) ([]byte, error) { var sdBuffer uintptr err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) @@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) { func SecurityDescriptorToSddl(sd []byte) (string, error) { var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. + // The returned string length seems to include an arbitrary number of terminating NULs. // Don't use it. err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) if err != nil { diff --git a/agent/vendor/github.com/Microsoft/go-winio/syscall.go b/agent/vendor/github.com/Microsoft/go-winio/syscall.go index 5955c99fdea..a6ca111b39c 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/agent/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,5 @@ +//go:build windows + package winio -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/agent/vendor/github.com/Microsoft/go-winio/tools.go b/agent/vendor/github.com/Microsoft/go-winio/tools.go new file mode 100644 index 00000000000..2aa045843ea --- /dev/null +++ b/agent/vendor/github.com/Microsoft/go-winio/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package winio + +import _ "golang.org/x/tools/cmd/stringer" diff --git a/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 176ff75e320..83f45a1351b 100644 --- a/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package winio @@ -47,9 +49,11 @@ var ( procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") @@ -74,7 +78,6 @@ var ( procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -123,6 +126,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision return } +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func getSecurityDescriptorLength(sd uintptr) (len uint32) { r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) len = uint32(r0) @@ -154,6 +165,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS return } +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) @@ -380,25 +399,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlNtStatusToDosError(status ntstatus) (winerr error) { +func rtlNtStatusToDosError(status ntStatus) (winerr error) { r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) if r0 != 0 { winerr = syscall.Errno(r0) @@ -417,11 +436,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint } return } - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index e9267b9554f..75dce5d821d 100644 --- a/agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -35,4 +35,16 @@ const ( // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 ) diff --git a/agent/vendor/github.com/cilium/ebpf/.gitignore b/agent/vendor/github.com/cilium/ebpf/.gitignore index 38b15653c0f..b46162b8ec3 100644 --- a/agent/vendor/github.com/cilium/ebpf/.gitignore +++ b/agent/vendor/github.com/cilium/ebpf/.gitignore @@ -5,6 +5,7 @@ *.so *.dylib *.o +!*_bpf*.o # Test binary, build with `go test -c` *.test diff --git a/agent/vendor/github.com/cilium/ebpf/.golangci.yaml b/agent/vendor/github.com/cilium/ebpf/.golangci.yaml index a88374197ec..dc62dd6d0fc 100644 --- a/agent/vendor/github.com/cilium/ebpf/.golangci.yaml +++ b/agent/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -24,6 +24,5 @@ linters: # Could be enabled later: # - gocyclo - # - prealloc # - maligned # - gosec diff --git a/agent/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/agent/vendor/github.com/cilium/ebpf/ARCHITECTURE.md index aee9c0a0d4d..8cd7e2486e7 100644 --- a/agent/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ b/agent/vendor/github.com/cilium/ebpf/ARCHITECTURE.md @@ -57,7 +57,7 @@ Objects loading a spec will fail because the kernel is too old, or a feature is not enabled. There are multiple ways the library deals with that: -* Fallback: older kernels don't allowing naming programs and maps. The library +* Fallback: older kernels don't allow naming programs and maps. The library automatically detects support for names, and omits them during load if necessary. This works since name is primarily a debug aid. @@ -68,7 +68,7 @@ enabled. There are multiple ways the library deals with that: Once program and map objects are loaded they expose the kernel's low-level API, e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer wrappers on top of the low-level API, like `MapIterator`. The low-level API is -useful as an out when our higher-level API doesn't support a particular use case. +useful when our higher-level API doesn't support a particular use case. Links --- @@ -78,3 +78,9 @@ tend to use bpf_link to do so. Older hooks unfortunately use a combination of syscalls, netlink messages, etc. Adding support for a new link type should not pull in large dependencies like netlink, so XDP programs or tracepoints are out of scope. + +Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds +to BPF_LINK_TRACING. In general, these types should be unexported as long as they +don't export methods outside of the Link interface. Each Go type may have multiple +exported constructors. For example `AttachTracing` and `AttachLSM` create a +tracing link, but are distinct functions since they may require different arguments. diff --git a/agent/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/agent/vendor/github.com/cilium/ebpf/CONTRIBUTING.md index 72ceb437829..0d29eae81e3 100644 --- a/agent/vendor/github.com/cilium/ebpf/CONTRIBUTING.md +++ b/agent/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -6,8 +6,8 @@ are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get a better understanding for the high-level goals. New features must be accompanied by tests. Before starting work on any large -feature, please [join](https://cilium.herokuapp.com/) the -[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to +feature, please [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to discuss the design first. When submitting pull requests, consider writing details about what problem you diff --git a/agent/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/agent/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 00000000000..9c18e7e76f5 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,8 @@ +# Maintainers + + * [Lorenz Bauer] + * [Timo Beckers] (Isovalent) + + +[Lorenz Bauer]: https://github.com/lmb +[Timo Beckers]: https://github.com/ti-mo diff --git a/agent/vendor/github.com/cilium/ebpf/Makefile b/agent/vendor/github.com/cilium/ebpf/Makefile index 5dd342c5b28..2d5f04c370e 100644 --- a/agent/vendor/github.com/cilium/ebpf/Makefile +++ b/agent/vendor/github.com/cilium/ebpf/Makefile @@ -1,70 +1,110 @@ # The development version of clang is distributed as the 'clang' binary, # while stable/released versions have a version number attached. # Pin the default clang to a stable version. -CLANG ?= clang-12 -CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS) +CLANG ?= clang-14 +STRIP ?= llvm-strip-14 +OBJCOPY ?= llvm-objcopy-14 +CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ # Obtain an absolute path to the directory of the Makefile. # Assume the Makefile is in the root of the repository. REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") + IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + # clang <8 doesn't tag relocs properly (STT_NOTYPE) # clang 9 is the first version emitting BTF TARGETS := \ testdata/loader-clang-7 \ testdata/loader-clang-9 \ testdata/loader-$(CLANG) \ + testdata/btf_map_init \ testdata/invalid_map \ testdata/raw_tracepoint \ testdata/invalid_map_static \ - testdata/initialized_btf_map \ + testdata/invalid_btf_map_init \ testdata/strings \ - internal/btf/testdata/relocs + testdata/freplace \ + testdata/iproute2_map_compat \ + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt -.PHONY: all clean docker-all docker-shell +.PHONY: all clean container-all container-shell generate -.DEFAULT_TARGET = docker-all +.DEFAULT_TARGET = container-all -# Build all ELF binaries using a Dockerized LLVM toolchain. -docker-all: - docker run --rm --user "${UIDGID}" \ +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + ${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ --env CFLAGS="-fdebug-prefix-map=/ebpf=." \ + --env HOME="/tmp" \ "${IMAGE}:${VERSION}" \ - make all + $(MAKE) all -# (debug) Drop the user into a shell inside the Docker container as root. -docker-shell: - docker run --rm -ti \ +# (debug) Drop the user into a shell inside the container as root. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti \ -v "${REPODIR}":/ebpf -w /ebpf \ "${IMAGE}:${VERSION}" clean: -$(RM) testdata/*.elf - -$(RM) internal/btf/testdata/*.elf + -$(RM) btf/testdata/*.elf -all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) +format: + find . -type f -name "*.c" | xargs clang-format -i + +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf +# $BPF_CLANG is used in go:generate invocations. +generate: export BPF_CLANG := $(CLANG) +generate: export BPF_CFLAGS := $(CFLAGS) +generate: + go generate ./cmd/bpf2go/test + go generate ./internal/sys + cd examples/ && go generate ./... + testdata/loader-%-el.elf: testdata/loader.c - $* $(CFLAGS) -mlittle-endian -c $< -o $@ + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ testdata/loader-%-eb.elf: testdata/loader.c - $* $(CFLAGS) -mbig-endian -c $< -o $@ + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ %-el.elf: %.c - $(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@ + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ %-eb.elf : %.c - $(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@ + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ -# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf -.PHONY: vmlinux-btf -vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz -internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX) - objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@" +.PHONY: generate-btf +generate-btf: KERNEL_VERSION?=5.18 +generate-btf: + $(eval TMP := $(shell mktemp -d)) + curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION).bz" -o "$(TMP)/bzImage" + ./testdata/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux" + $(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz" + curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-selftests-bpf.tgz" -o "$(TMP)/selftests.tgz" + tar -xf "$(TMP)/selftests.tgz" --to-stdout tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko | \ + $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" - /dev/null + $(RM) -r "$(TMP)" diff --git a/agent/vendor/github.com/cilium/ebpf/README.md b/agent/vendor/github.com/cilium/ebpf/README.md index 76c3c303bb1..3e490de7110 100644 --- a/agent/vendor/github.com/cilium/ebpf/README.md +++ b/agent/vendor/github.com/cilium/ebpf/README.md @@ -2,28 +2,16 @@ [![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) +![HoneyGopher](.github/images/cilium-ebpf.png) + eBPF is a pure Go library that provides utilities for loading, compiling, and debugging eBPF programs. It has minimal external dependencies and is intended to be used in long running processes. -* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic - assembler -* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF - to various hooks -* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a - `PERF_EVENT_ARRAY` -* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows - compiling and embedding eBPF programs in Go code - The library is maintained by [Cloudflare](https://www.cloudflare.com) and -[Cilium](https://www.cilium.io). Feel free to -[join](https://cilium.herokuapp.com/) the -[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack. - -## Current status +[Cilium](https://www.cilium.io). -The package is production ready, but **the API is explicitly unstable right -now**. Expect to update your code if you want to follow along. +See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem. ## Getting Started @@ -33,21 +21,41 @@ your own tools can be found under [examples/](examples/). Contributions are highly encouraged, as they highlight certain use cases of eBPF and the library, and help shape the future of the project. +## Getting Help + +Please +[join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have questions regarding the library. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. + ## Requirements * A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy) -* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested) - -## Useful resources - -* [eBPF.io](https://ebpf.io) (recommended) -* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide) - (recommended) -* [Linux documentation on - BPF](https://www.kernel.org/doc/html/latest/networking/filter.html) -* [eBPF features by Linux - version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md) +* Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is + not tested against. ## Regenerating Testdata @@ -55,8 +63,15 @@ Run `make` in the root of this repository to rebuild testdata in all subpackages. This requires Docker, as it relies on a standardized build environment to keep the build output stable. +It is possible to regenerate data using Podman by overriding the `CONTAINER_*` +variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`. + The toolchain image build files are kept in [testdata/docker/](testdata/docker/). ## License MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. diff --git a/agent/vendor/github.com/cilium/ebpf/asm/func.go b/agent/vendor/github.com/cilium/ebpf/asm/func.go index aee2c7ac810..a14e9e2c3ce 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/func.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/func.go @@ -5,6 +5,10 @@ package asm // BuiltinFunc is a built-in eBPF function. type BuiltinFunc int32 +func (_ BuiltinFunc) Max() BuiltinFunc { + return maxBuiltinFunc - 1 +} + // eBPF built-in functions // // You can regenerate this list using the following gawk script: @@ -184,6 +188,49 @@ const ( FnKtimeGetCoarseNs FnImaInodeHash FnSockFromFile + FnCheckMtu + FnForEachMapElem + FnSnprintf + FnSysBpf + FnBtfFindByNameKind + FnSysClose + FnTimerInit + FnTimerSetCallback + FnTimerStart + FnTimerCancel + FnGetFuncIp + FnGetAttachCookie + FnTaskPtRegs + FnGetBranchSnapshot + FnTraceVprintk + FnSkcToUnixSock + FnKallsymsLookupName + FnFindVma + FnLoop + FnStrncmp + FnGetFuncArg + FnGetFuncRet + FnGetFuncArgCnt + FnGetRetval + FnSetRetval + FnXdpGetBuffLen + FnXdpLoadBytes + FnXdpStoreBytes + FnCopyFromUserTask + FnSkbSetTstamp + FnImaFileHash + FnKptrXchg + FnMapLookupPercpuElem + FnSkcToMptcpSock + FnDynptrFromMem + FnRingbufReserveDynptr + FnRingbufSubmitDynptr + FnRingbufDiscardDynptr + FnDynptrRead + FnDynptrWrite + FnDynptrData + + maxBuiltinFunc ) // Call emits a function call. diff --git a/agent/vendor/github.com/cilium/ebpf/asm/func_string.go b/agent/vendor/github.com/cilium/ebpf/asm/func_string.go index a712c5da8af..b7431b7f605 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -171,11 +171,53 @@ func _() { _ = x[FnKtimeGetCoarseNs-160] _ = x[FnImaInodeHash-161] _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[maxBuiltinFunc-204] } -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFile" +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDatamaxBuiltinFunc" -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424} +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3011} func (i BuiltinFunc) String() string { if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { diff --git a/agent/vendor/github.com/cilium/ebpf/asm/instruction.go b/agent/vendor/github.com/cilium/ebpf/asm/instruction.go index e7ac0109e2d..f17d88b5186 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -8,8 +8,10 @@ import ( "fmt" "io" "math" + "sort" "strings" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -19,6 +21,10 @@ const InstructionSize = 8 // RawInstructionOffset is an offset in units of raw BPF instructions. type RawInstructionOffset uint64 +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + // Bytes returns the offset of an instruction in bytes. func (rio RawInstructionOffset) Bytes() uint64 { return uint64(rio) * InstructionSize @@ -26,50 +32,57 @@ func (rio RawInstructionOffset) Bytes() uint64 { // Instruction is a single eBPF instruction. type Instruction struct { - OpCode OpCode - Dst Register - Src Register - Offset int16 - Constant int64 - Reference string - Symbol string -} - -// Sym creates a symbol. -func (ins Instruction) Sym(name string) Instruction { - ins.Symbol = name - return ins + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Metadata contains optional metadata about this instruction. + Metadata Metadata } // Unmarshal decodes a BPF instruction. func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { - var bi bpfInstruction - err := binary.Read(r, bo, &bi) - if err != nil { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { return 0, err } - ins.OpCode = bi.OpCode - ins.Offset = bi.Offset - ins.Constant = int64(bi.Constant) - ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo) - if err != nil { - return 0, fmt.Errorf("can't unmarshal registers: %s", err) + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) } - if !bi.OpCode.IsDWordLoad() { + ins.Offset = int16(bo.Uint16(data[2:4])) + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if !ins.OpCode.IsDWordLoad() { return InstructionSize, nil } - var bi2 bpfInstruction - if err := binary.Read(r, bo, &bi2); err != nil { + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { // No Wrap, to avoid io.EOF clash return 0, errors.New("64bit immediate is missing second half") } - if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 { + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { return 0, errors.New("64bit immediate has non-zero fields") } - ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant))) + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) return 2 * InstructionSize, nil } @@ -93,14 +106,12 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("can't marshal registers: %s", err) } - bpfi := bpfInstruction{ - ins.OpCode, - regs, - ins.Offset, - cons, - } - - if err := binary.Write(w, bo, &bpfi); err != nil { + data := make([]byte, InstructionSize) + data[0] = byte(ins.OpCode) + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { return 0, err } @@ -108,42 +119,76 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return InstructionSize, nil } - bpfi = bpfInstruction{ - Constant: int32(ins.Constant >> 32), - } - - if err := binary.Write(w, bo, &bpfi); err != nil { + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { return 0, err } return 2 * InstructionSize, nil } +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + // RewriteMapPtr changes an instruction to use a new map fd. // // Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. func (ins *Instruction) RewriteMapPtr(fd int) error { - if !ins.OpCode.IsDWordLoad() { - return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) - } - - if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue { + if !ins.IsLoadFromMap() { return errors.New("not a load from a map") } + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { // Preserve the offset value for direct map loads. offset := uint64(ins.Constant) & (math.MaxUint32 << 32) rawFd := uint64(uint32(fd)) ins.Constant = int64(offset | rawFd) - return nil } // MapPtr returns the map fd for this instruction. // // The result is undefined if the instruction is not a load from a map, // see IsLoadFromMap. +// +// Deprecated: use Map() instead. func (ins *Instruction) MapPtr() int { - return int(int32(uint64(ins.Constant) & math.MaxUint32)) + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) } // RewriteMapOffset changes the offset of a direct load from a map. @@ -181,6 +226,23 @@ func (ins *Instruction) IsFunctionCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall } +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + // IsConstantLoad returns true if the instruction loads a constant of the // given size. func (ins *Instruction) IsConstantLoad(size Size) bool { @@ -208,21 +270,30 @@ func (ins Instruction) Format(f fmt.State, c rune) { } if ins.IsLoadFromMap() { - fd := ins.MapPtr() + fd := ins.mapFd() + m := ins.Map() switch ins.Src { case PseudoMapFD: - fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } case PseudoMapValue: - fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } } goto ref } fmt.Fprintf(f, "%v ", op) - switch cls := op.Class(); cls { - case LdClass, LdXClass, StClass, StXClass: + switch cls := op.Class(); { + case cls.isLoadOrStore(): switch op.Mode() { case ImmMode: fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) @@ -236,7 +307,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) } - case ALU64Class, ALUClass: + case cls.IsALU(): fmt.Fprintf(f, "dst: %s ", ins.Dst) if op.ALUOp() == Swap || op.Source() == ImmSource { fmt.Fprintf(f, "imm: %d", ins.Constant) @@ -244,7 +315,7 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "src: %s", ins.Src) } - case JumpClass: + case cls.IsJump(): switch jop := op.JumpOp(); jop { case Call: if ins.Src == PseudoCall { @@ -265,42 +336,212 @@ func (ins Instruction) Format(f fmt.State, c rune) { } ref: - if ins.Reference != "" { - fmt.Fprintf(f, " <%s>", ins.Reference) + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) } } +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + // Instructions is an eBPF program. type Instructions []Instruction +// Unmarshal unmarshals an Instructions from a binary instruction stream. +// All instructions in insns are replaced by instructions decoded from r. +func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { + if len(*insns) > 0 { + *insns = nil + } + + var offset uint64 + for { + var ins Instruction + n, err := ins.Unmarshal(r, bo) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("offset %d: %w", offset, err) + } + + *insns = append(*insns, ins) + offset += n + } + + return nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol() +} + func (insns Instructions) String() string { return fmt.Sprint(insns) } +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + // RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. // -// Returns an error if the symbol isn't used, see IsUnreferencedSymbol. +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { if symbol == "" { return errors.New("empty symbol") } - found := false + var found bool for i := range insns { ins := &insns[i] - if ins.Reference != symbol { + if ins.Reference() != symbol { continue } - if err := ins.RewriteMapPtr(fd); err != nil { - return err + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") } + ins.encodeMapFD(fd) + found = true } if !found { - return &unreferencedSymbolError{symbol} + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) } return nil @@ -312,31 +553,61 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) { offsets := make(map[string]int) for i, ins := range insns { - if ins.Symbol == "" { + if ins.Symbol() == "" { continue } - if _, ok := offsets[ins.Symbol]; ok { - return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol) + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) } - offsets[ins.Symbol] = i + offsets[ins.Symbol()] = i } return offsets, nil } +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference() == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) + } + + sort.Strings(result) + return result +} + // ReferenceOffsets returns the set of references and their offset in // the instructions. func (insns Instructions) ReferenceOffsets() map[string][]int { offsets := make(map[string][]int) for i, ins := range insns { - if ins.Reference == "" { + if ins.Reference() == "" { continue } - offsets[ins.Reference] = append(offsets[ins.Reference], i) + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) } return offsets @@ -387,18 +658,36 @@ func (insns Instructions) Format(f fmt.State, c rune) { iter := insns.Iterate() for iter.Next() { - if iter.Ins.Symbol != "" { - fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol) + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } } fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) } } // Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + for i, ins := range insns { - _, err := ins.Marshal(w, bo) - if err != nil { + if _, err := ins.Marshal(w, bo); err != nil { return fmt.Errorf("instruction %d: %w", i, err) } } @@ -424,6 +713,95 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil } +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil +} + // Iterate allows iterating a BPF program while keeping track of // various offsets. // @@ -459,13 +837,6 @@ func (iter *InstructionIterator) Next() bool { return true } -type bpfInstruction struct { - OpCode OpCode - Registers bpfRegisters - Offset int16 - Constant int32 -} - type bpfRegisters uint8 func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { @@ -479,28 +850,10 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro } } -func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) { - switch bo { - case binary.LittleEndian: - return Register(r & 0xF), Register(r >> 4), nil - case binary.BigEndian: - return Register(r >> 4), Register(r & 0xf), nil - default: - return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo) - } -} - -type unreferencedSymbolError struct { - symbol string -} - -func (use *unreferencedSymbolError) Error() string { - return fmt.Sprintf("unreferenced symbol %s", use.symbol) -} - // IsUnreferencedSymbol returns true if err was caused by // an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). func IsUnreferencedSymbol(err error) bool { - _, ok := err.(*unreferencedSymbolError) - return ok + return errors.Is(err, ErrUnreferencedSymbol) } diff --git a/agent/vendor/github.com/cilium/ebpf/asm/jump.go b/agent/vendor/github.com/cilium/ebpf/asm/jump.go index 7757179de64..e31e42cac52 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/jump.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/jump.go @@ -60,50 +60,68 @@ func (op JumpOp) Op(source Source) OpCode { return OpCode(JumpClass).SetJumpOp(op).SetSource(source) } -// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled. +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} - } + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource), - Dst: dst, - Offset: -1, - Constant: int64(value), - Reference: label, - } + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) } -// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled. +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Reg(dst, src Register, label string) Instruction { - if op == Exit || op == Call || op == Ja { - return Instruction{OpCode: InvalidOpCode} - } + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource), - Dst: dst, - Src: src, - Offset: -1, - Reference: label, + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call || op == Ja { + return InvalidOpCode } + + return OpCode(class).SetJumpOp(op).SetSource(source) } // Label adjusts PC to the address of the label. func (op JumpOp) Label(label string) Instruction { if op == Call { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(Call), - Src: PseudoCall, - Constant: -1, - Reference: label, - } + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) } return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op), - Offset: -1, - Reference: label, - } + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) } diff --git a/agent/vendor/github.com/cilium/ebpf/asm/metadata.go b/agent/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 00000000000..dd368a93603 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/agent/vendor/github.com/cilium/ebpf/asm/opcode.go b/agent/vendor/github.com/cilium/ebpf/asm/opcode.go index 6edc3cf5917..b11917e18bb 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/opcode.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -7,14 +7,6 @@ import ( //go:generate stringer -output opcode_string.go -type=Class -type encoding int - -const ( - unknownEncoding encoding = iota - loadOrStore - jumpOrALU -) - // Class of operations // // msb lsb @@ -26,31 +18,52 @@ type Class uint8 const classMask OpCode = 0x07 const ( - // LdClass load memory + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. LdClass Class = 0x00 - // LdXClass load memory from constant + // LdXClass loads memory into registers. LdXClass Class = 0x01 - // StClass load register from memory + // StClass stores immediate values to memory. StClass Class = 0x02 - // StXClass load register from constant + // StXClass stores registers to memory. StXClass Class = 0x03 - // ALUClass arithmetic operators + // ALUClass describes arithmetic operators. ALUClass Class = 0x04 - // JumpClass jump operators + // JumpClass describes jump operators. JumpClass Class = 0x05 - // ALU64Class arithmetic in 64 bit mode + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. + Jump32Class Class = 0x06 + // ALU64Class describes arithmetic operators in 64-bit mode. ALU64Class Class = 0x07 ) -func (cls Class) encoding() encoding { - switch cls { - case LdClass, LdXClass, StClass, StXClass: - return loadOrStore - case ALU64Class, ALUClass, JumpClass: - return jumpOrALU - default: - return unknownEncoding - } +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() } // OpCode is a packed eBPF opcode. @@ -86,7 +99,7 @@ func (op OpCode) Class() Class { // Mode returns the mode for load and store operations. func (op OpCode) Mode() Mode { - if op.Class().encoding() != loadOrStore { + if !op.Class().isLoadOrStore() { return InvalidMode } return Mode(op & modeMask) @@ -94,7 +107,7 @@ func (op OpCode) Mode() Mode { // Size returns the size for load and store operations. func (op OpCode) Size() Size { - if op.Class().encoding() != loadOrStore { + if !op.Class().isLoadOrStore() { return InvalidSize } return Size(op & sizeMask) @@ -102,7 +115,7 @@ func (op OpCode) Size() Size { // Source returns the source for branch and ALU operations. func (op OpCode) Source() Source { - if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { return InvalidSource } return Source(op & sourceMask) @@ -110,7 +123,7 @@ func (op OpCode) Source() Source { // ALUOp returns the ALUOp. func (op OpCode) ALUOp() ALUOp { - if op.Class().encoding() != jumpOrALU { + if !op.Class().IsALU() { return InvalidALUOp } return ALUOp(op & aluMask) @@ -125,18 +138,27 @@ func (op OpCode) Endianness() Endianness { } // JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. func (op OpCode) JumpOp() JumpOp { - if op.Class().encoding() != jumpOrALU { + if !op.Class().IsJump() { return InvalidJumpOp } - return JumpOp(op & jumpMask) + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) { + return InvalidJumpOp + } + + return jumpOp } // SetMode sets the mode on load and store operations. // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetMode(mode Mode) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { return InvalidOpCode } return (op & ^modeMask) | OpCode(mode) @@ -146,7 +168,7 @@ func (op OpCode) SetMode(mode Mode) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetSize(size Size) OpCode { - if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { return InvalidOpCode } return (op & ^sizeMask) | OpCode(size) @@ -156,7 +178,7 @@ func (op OpCode) SetSize(size Size) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetSource(source Source) OpCode { - if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { return InvalidOpCode } return (op & ^sourceMask) | OpCode(source) @@ -166,8 +188,7 @@ func (op OpCode) SetSource(source Source) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetALUOp(alu ALUOp) OpCode { - class := op.Class() - if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { return InvalidOpCode } return (op & ^aluMask) | OpCode(alu) @@ -177,17 +198,25 @@ func (op OpCode) SetALUOp(alu ALUOp) OpCode { // // Returns InvalidOpCode if op is of the wrong class. func (op OpCode) SetJumpOp(jump JumpOp) OpCode { - if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { return InvalidOpCode } - return (op & ^jumpMask) | OpCode(jump) + + return newOp } func (op OpCode) String() string { var f strings.Builder - switch class := op.Class(); class { - case LdClass, LdXClass, StClass, StXClass: + switch class := op.Class(); { + case class.isLoadOrStore(): f.WriteString(strings.TrimSuffix(class.String(), "Class")) mode := op.Mode() @@ -204,7 +233,7 @@ func (op OpCode) String() string { f.WriteString("B") } - case ALU64Class, ALUClass: + case class.IsALU(): f.WriteString(op.ALUOp().String()) if op.ALUOp() == Swap { @@ -218,8 +247,13 @@ func (op OpCode) String() string { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } - case JumpClass: + case class.IsJump(): f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + if jop := op.JumpOp(); jop != Exit && jop != Call { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } diff --git a/agent/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/agent/vendor/github.com/cilium/ebpf/asm/opcode_string.go index 079ce1db0b8..58bc3e7e7f0 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/opcode_string.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -14,25 +14,17 @@ func _() { _ = x[StXClass-3] _ = x[ALUClass-4] _ = x[JumpClass-5] + _ = x[Jump32Class-6] _ = x[ALU64Class-7] } -const ( - _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass" - _Class_name_1 = "ALU64Class" -) +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" -var ( - _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47} -) +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} func (i Class) String() string { - switch { - case 0 <= i && i <= 5: - return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]] - case i == 7: - return _Class_name_1 - default: + if i >= Class(len(_Class_index)-1) { return "Class(" + strconv.FormatInt(int64(i), 10) + ")" } + return _Class_name[_Class_index[i]:_Class_index[i+1]] } diff --git a/agent/vendor/github.com/cilium/ebpf/asm/register.go b/agent/vendor/github.com/cilium/ebpf/asm/register.go index 76cb44bffc7..dd5d44f1c19 100644 --- a/agent/vendor/github.com/cilium/ebpf/asm/register.go +++ b/agent/vendor/github.com/cilium/ebpf/asm/register.go @@ -38,6 +38,7 @@ const ( PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC ) func (r Register) String() string { diff --git a/agent/vendor/github.com/cilium/ebpf/attachtype_string.go b/agent/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 00000000000..de355ed9092 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,65 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] +} + +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent" + +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610} + +func (i AttachType) String() string { + if i >= AttachType(len(_AttachType_index)-1) { + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] +} diff --git a/agent/vendor/github.com/cilium/ebpf/btf/btf.go b/agent/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 00000000000..a5969332aaa --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,897 @@ +package btf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +// Spec represents decoded BTF. +type Spec struct { + // Data from .BTF. + rawTypes []rawType + strings *stringTable + + // All types contained by the spec. For the base type, the position of + // a type in the slice is its ID. + types types + + // Type IDs indexed by type. + typeIDs map[Type]TypeID + + // Types indexed by essential name. + // Includes all struct flavors and types with the same name. + namedTypes map[essentialName][]Type + + byteOrder binary.ByteOrder +} + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// typeStart returns the offset from the beginning of the .BTF section +// to the start of its type entries. +func (h *btfHeader) typeStart() int64 { + return int64(h.HdrLen + h.TypeOff) +} + +// stringStart returns the offset from the beginning of the .BTF section +// to the start of its string table. +func (h *btfHeader) stringStart() int64 { + return int64(h.HdrLen + h.StringOff) +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + if bo := guessRawBTFByteOrder(rd); bo != nil { + // Try to parse a naked BTF blob. This will return an error if + // we encounter a Datasec, since we can't fix it up. + spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil) + return spec, err + } + + return nil, err + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadta. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// variableOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + variableOffsets := make(map[variable]uint32) + for _, symbol := range symbols { + if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if symbol.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(symbol.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) + } + + secName := file.Sections[symbol.Section].Name + variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) + } + + return variableOffsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + vars, err := variableOffsets(file) + if err != nil { + return nil, err + } + + if btfSection.ReaderAt == nil { + return nil, fmt.Errorf("compressed BTF is not supported") + } + + rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil) + if err != nil { + return nil, err + } + + err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars) + if err != nil { + return nil, err + } + + return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil) +} + +func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, + baseTypes types, baseStrings *stringTable) (*Spec, error) { + + rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings) + if err != nil { + return nil, err + } + + return inflateSpec(rawTypes, rawStrings, bo, baseTypes) +} + +func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder, + baseTypes types) (*Spec, error) { + + types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings) + if err != nil { + return nil, err + } + + typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes))) + + return &Spec{ + rawTypes: rawTypes, + namedTypes: typesByName, + typeIDs: typeIDs, + types: types, + strings: rawStrings, + byteOrder: bo, + }, nil +} + +func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) { + namedTypes := 0 + for _, typ := range types { + if typ.TypeName() != "" { + // Do a pre-pass to figure out how big types by name has to be. + // Most types have unique names, so it's OK to ignore essentialName + // here. + namedTypes++ + } + } + + typeIDs := make(map[Type]TypeID, len(types)) + typesByName := make(map[essentialName][]Type, namedTypes) + + for i, typ := range types { + if name := newEssentialName(typ.TypeName()); name != "" { + typesByName[name] = append(typesByName[name], typ) + } + typeIDs[typ] = TypeID(i) + typeIDOffset + } + + return typeIDs, typesByName +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +func LoadKernelSpec() (*Spec, error) { + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + return loadRawSpec(fh, internal.NativeEndian, nil, nil) + } + + file, err := findVMLinux() + if err != nil { + return nil, err + } + defer file.Close() + + return loadSpecFromELF(file) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*internal.SafeELFFile, error) { + release, err := internal.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { + var header btfHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, errors.New("header length shorter than btfHeader size") + } + + if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { + return nil, fmt.Errorf("header padding: %v", err) + } + + return &header, nil +} + +func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { + buf := new(bufio.Reader) + for _, bo := range []binary.ByteOrder{ + binary.LittleEndian, + binary.BigEndian, + } { + buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64)) + if _, err := parseBTFHeader(buf, bo); err == nil { + return bo + } + } + + return nil +} + +// parseBTF reads a .BTF section into memory and parses it into a list of +// raw types and a string table. +func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) { + buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) + header, err := parseBTFHeader(buf, bo) + if err != nil { + return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), + baseStrings) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) + } + + buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) + rawTypes, err := readTypes(buf, bo, header.TypeLen) + if err != nil { + return nil, nil, fmt.Errorf("can't read types: %w", err) + } + + return rawTypes, rawStrings, nil +} + +type variable struct { + section string + name string +} + +func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { + for i, rawType := range rawTypes { + if rawType.Kind() != kindDatasec { + continue + } + + name, err := rawStrings.Lookup(rawType.NameOff) + if err != nil { + return err + } + + if name == ".kconfig" || name == ".ksyms" { + return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) + } + + if rawTypes[i].SizeType != 0 { + continue + } + + size, ok := sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + rawTypes[i].SizeType = size + + secinfos := rawType.data.([]btfVarSecinfo) + for j, secInfo := range secinfos { + id := int(secInfo.Type - 1) + if id >= len(rawTypes) { + return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) + } + + varName, err := rawStrings.Lookup(rawTypes[id].NameOff) + if err != nil { + return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) + } + + offset, ok := variableOffsets[variable{name, varName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) + } + + secinfos[j].Offset = offset + } + } + + return nil +} + +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + types := copyTypes(s.types, nil) + + typeIDOffset := TypeID(0) + if len(s.types) != 0 { + typeIDOffset = s.typeIDs[s.types[0]] + } + typeIDs, typesByName := indexTypes(types, typeIDOffset) + + // NB: Other parts of spec are not copied since they are immutable. + return &Spec{ + s.rawTypes, + s.strings, + types, + typeIDs, + typesByName, + s.byteOrder, + } +} + +type marshalOpts struct { + ByteOrder binary.ByteOrder + StripFuncLinkage bool +} + +func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { + var ( + buf bytes.Buffer + header = new(btfHeader) + headerLen = binary.Size(header) + ) + + // Reserve space for the header. We have to write it last since + // we don't know the size of the type section yet. + _, _ = buf.Write(make([]byte, headerLen)) + + // Write type section, just after the header. + for _, raw := range s.rawTypes { + switch { + case opts.StripFuncLinkage && raw.Kind() == kindFunc: + raw.SetLinkage(StaticFunc) + } + + if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { + return nil, fmt.Errorf("can't marshal BTF: %w", err) + } + } + + typeLen := uint32(buf.Len() - headerLen) + + // Write string section after type section. + stringsLen := s.strings.Length() + buf.Grow(stringsLen) + if err := s.strings.Marshal(&buf); err != nil { + return nil, err + } + + // Fill out the header, and write it out. + header = &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(headerLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringsLen), + } + + raw := buf.Bytes() + err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) + if err != nil { + return nil, fmt.Errorf("can't write header: %v", err) + } + + return raw, nil +} + +type sliceWriter []byte + +func (sw sliceWriter) Write(p []byte) (int, error) { + if len(p) != len(sw) { + return 0, errors.New("size doesn't match") + } + + return copy(sw, p), nil +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + return s.types.ByID(id) +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping ErrNoFound if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + id, ok := s.typeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + types := s.namedTypes[newEssentialName(name)] + if len(types) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(types)) + for _, t := range types { + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if t.TypeName() == name { + result = append(result, t) + } + } + return result, nil +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple +// Types with the same name can exist, the parameter typ is taken to +// narrow down the search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. +// On success, the address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching +// Type exists in the Spec. If multiple candidates are found, +// an error is returned. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s: multiple candidates for %T", name, typ) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("type %s: %w", name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + return loadRawSpec(r, internal.NativeEndian, base.types, base.strings) +} + +// TypesIterator iterates over types of a given spec. +type TypesIterator struct { + spec *Spec + index int + // The last visited type in the spec. + Type Type +} + +// Iterate returns the types iterator. +func (s *Spec) Iterate() *TypesIterator { + return &TypesIterator{spec: s, index: 0} +} + +// Next returns true as long as there are any remaining types. +func (iter *TypesIterator) Next() bool { + if len(iter.spec.types) <= iter.index { + return false + } + + iter.Type = iter.spec.types[iter.index] + iter.index++ + return true +} + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 +} + +// NewHandle loads BTF into the kernel. +// +// Returns ErrNotSupported if BTF is not supported. +func NewHandle(spec *Spec) (*Handle, error) { + if err := haveBTF(); err != nil { + return nil, err + } + + if spec.byteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) + } + + btf, err := spec.marshal(marshalOpts{ + ByteOrder: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + }) + if err != nil { + return nil, fmt.Errorf("can't marshal BTF: %w", err) + } + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + fd, err := sys.BtfLoad(attr) + if err != nil { + logBuf := make([]byte, 64*1024) + attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogSize = uint32(len(logBuf)) + attr.BtfLogLevel = 1 + // NB: The syscall will never return ENOSPC as of 5.18-rc4. + _, _ = sys.BtfLoad(attr) + return nil, internal.ErrorWithLog(err, logBuf) + } + + return &Handle{fd, attr.BtfSize}, nil +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base is used to decode split BTF and may be nil. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + var baseTypes types + var baseStrings *stringTable + if base != nil { + baseTypes = base.types + baseStrings = base.strings + } + + return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { + const minHeaderLength = 24 + + typesLen := uint32(binary.Size(types)) + header := btfHeader{ + Magic: btfMagic, + Version: 1, + HdrLen: minHeaderLength, + TypeOff: 0, + TypeLen: typesLen, + StringOff: typesLen, + StringLen: uint32(len(strings)), + } + + buf := new(bytes.Buffer) + _ = binary.Write(buf, bo, &header) + _ = binary.Write(buf, bo, types) + buf.Write(strings) + + return buf.Bytes() +} + +var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { + var ( + types struct { + Integer btfType + Var btfType + btfVar struct{ Linkage uint32 } + } + strings = []byte{0, 'a', 0} + ) + + // We use a BTF_KIND_VAR here, to make sure that + // the kernel understands BTF at least as well as we + // do. BTF_KIND_VAR was introduced ~5.1. + types.Integer.SetKind(kindPointer) + types.Var.NameOff = 1 + types.Var.SetKind(kindVar) + types.Var.SizeType = 1 + + btf := marshalBTF(&types, strings, internal.NativeEndian) + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + }) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: loading the program + // might still succeed without BTF. + return internal.ErrNotSupported + } + if err != nil { + return err + } + + fd.Close() + return nil +}) + +var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { + if err := haveBTF(); err != nil { + return err + } + + var ( + types struct { + FuncProto btfType + Func btfType + } + strings = []byte{0, 'a', 0} + ) + + types.FuncProto.SetKind(kindFuncProto) + types.Func.SetKind(kindFunc) + types.Func.SizeType = 1 // aka FuncProto + types.Func.NameOff = 1 + types.Func.SetLinkage(GlobalFunc) + + btf := marshalBTF(&types, strings, internal.NativeEndian) + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + fd.Close() + return nil +}) diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/agent/vendor/github.com/cilium/ebpf/btf/btf_types.go similarity index 71% rename from agent/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go rename to agent/vendor/github.com/cilium/ebpf/btf/btf_types.go index a5ef9451201..4810180494e 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go +++ b/agent/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -31,6 +31,8 @@ const ( // Added ~5.1 kindVar kindDatasec + // Added ~5.13 + kindFloat ) // FuncLinkage describes BTF function linkage metadata. @@ -54,7 +56,7 @@ const ( const ( btfTypeKindShift = 24 - btfTypeKindLen = 4 + btfTypeKindLen = 5 btfTypeVlenShift = 0 btfTypeVlenMask = 16 btfTypeKindFlagShift = 31 @@ -67,8 +69,8 @@ type btfType struct { /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members), linkage * bits 16-23: unused - * bits 24-27: kind (e.g. int, ptr, array...etc) - * bits 28-30: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused * bit 31: kind_flag, currently used by * struct, union and fwd */ @@ -117,6 +119,8 @@ func (k btfKind) String() string { return "Variable" case kindDatasec: return "Section" + case kindFloat: + return "Float" default: return fmt.Sprintf("Unknown (%d)", k) } @@ -126,13 +130,22 @@ func mask(len uint32) uint32 { return (1 << len) - 1 } +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + func (bt *btfType) info(len, shift uint32) uint32 { - return (bt.Info >> shift) & mask(len) + return readBits(bt.Info, len, shift) } func (bt *btfType) setInfo(value, len, shift uint32) { - bt.Info &^= mask(len) << shift - bt.Info |= (value & mask(len)) << shift + bt.Info = writeBits(bt.Info, len, shift, value) } func (bt *btfType) Kind() btfKind { @@ -173,6 +186,10 @@ func (bt *btfType) Size() uint32 { return bt.SizeType } +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + type rawType struct { btfType data interface{} @@ -190,6 +207,50 @@ func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { return binary.Write(w, bo, rt.data) } +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + type btfArray struct { Type TypeID IndexType TypeID @@ -222,11 +283,14 @@ type btfParam struct { Type TypeID } -func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { - var ( - header btfType - types []rawType - ) +func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) { + var header btfType + // because of the interleaving between types and struct members it is difficult to + // precompute the numbers of raw types this will parse + // this "guess" is a good first estimation + sizeOfbtfType := uintptr(binary.Size(btfType{})) + tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 + types := make([]rawType, 0, tyMaxCount) for id := TypeID(1); ; id++ { if err := binary.Read(r, bo, &header); err == io.EOF { @@ -238,7 +302,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { var data interface{} switch header.Kind() { case kindInt: - data = new(uint32) + data = new(btfInt) case kindPointer: case kindArray: data = new(btfArray) @@ -260,6 +324,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { data = new(btfVariable) case kindDatasec: data = make([]btfVarSecinfo, header.Vlen()) + case kindFloat: default: return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) } @@ -276,7 +341,3 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { types = append(types, rawType{header, data}) } } - -func intEncoding(raw uint32) (IntEncoding, uint32, byte) { - return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff) -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go b/agent/vendor/github.com/cilium/ebpf/btf/btf_types_string.go similarity index 100% rename from agent/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go rename to agent/vendor/github.com/cilium/ebpf/btf/btf_types_string.go diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/core.go b/agent/vendor/github.com/cilium/ebpf/btf/core.go similarity index 64% rename from agent/vendor/github.com/cilium/ebpf/internal/btf/core.go rename to agent/vendor/github.com/cilium/ebpf/btf/core.go index 7c888f602d0..c4875480935 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/core.go +++ b/agent/vendor/github.com/cilium/ebpf/btf/core.go @@ -1,11 +1,11 @@ package btf import ( + "encoding/binary" "errors" "fmt" "math" "reflect" - "sort" "strconv" "strings" @@ -17,50 +17,58 @@ import ( // COREFixup is the result of computing a CO-RE relocation for a target. type COREFixup struct { - Kind COREKind - Local uint32 - Target uint32 - Poison bool + kind coreKind + local uint32 + target uint32 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool } -func (f COREFixup) equal(other COREFixup) bool { - return f.Local == other.Local && f.Target == other.Target +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target } -func (f COREFixup) String() string { - if f.Poison { - return fmt.Sprintf("%s=poison", f.Kind) +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) } - return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target) + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) } -func (f COREFixup) apply(ins *asm.Instruction) error { - if f.Poison { - return errors.New("can't poison individual instruction") +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if f.poison { + const badRelo = 0xbad2310 + + *ins = asm.BuiltinFunc(badRelo).Call() + return nil } switch class := ins.OpCode.Class(); class { case asm.LdXClass, asm.StClass, asm.StXClass: - if want := int16(f.Local); want != ins.Offset { - return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want) + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) } - if f.Target > math.MaxInt16 { - return fmt.Errorf("offset %d exceeds MaxInt16", f.Target) + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) } - ins.Offset = int16(f.Target) + ins.Offset = int16(f.target) case asm.LdClass: if !ins.IsConstantLoad(asm.DWord) { return fmt.Errorf("not a dword-sized immediate load") } - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) } - ins.Constant = int64(f.Target) + ins.Constant = int64(f.target) case asm.ALUClass: if ins.OpCode.ALUOp() == asm.Swap { @@ -74,15 +82,15 @@ func (f COREFixup) apply(ins *asm.Instruction) error { return fmt.Errorf("invalid source %s", src) } - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) } - if f.Target > math.MaxInt32 { - return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target) + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) } - ins.Constant = int64(f.Target) + ins.Constant = int64(f.target) default: return fmt.Errorf("invalid class %s", class) @@ -92,57 +100,14 @@ func (f COREFixup) apply(ins *asm.Instruction) error { } func (f COREFixup) isNonExistant() bool { - return f.Kind.checksForExistence() && f.Target == 0 -} - -type COREFixups map[uint64]COREFixup - -// Apply a set of CO-RE relocations to a BPF program. -func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { - if len(fs) == 0 { - cpy := make(asm.Instructions, len(insns)) - copy(cpy, insns) - return insns, nil - } - - cpy := make(asm.Instructions, 0, len(insns)) - iter := insns.Iterate() - for iter.Next() { - fixup, ok := fs[iter.Offset.Bytes()] - if !ok { - cpy = append(cpy, *iter.Ins) - continue - } - - ins := *iter.Ins - if fixup.Poison { - const badRelo = asm.BuiltinFunc(0xbad2310) - - cpy = append(cpy, badRelo.Call()) - if ins.OpCode.IsDWordLoad() { - // 64 bit constant loads occupy two raw bpf instructions, so - // we need to add another instruction as padding. - cpy = append(cpy, badRelo.Call()) - } - - continue - } - - if err := fixup.apply(&ins); err != nil { - return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err) - } - - cpy = append(cpy, ins) - } - - return cpy, nil + return f.kind.checksForExistence() && f.target == 0 } -// COREKind is the type of CO-RE relocation -type COREKind uint32 +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 const ( - reloFieldByteOffset COREKind = iota /* field byte offset */ + reloFieldByteOffset coreKind = iota /* field byte offset */ reloFieldByteSize /* field size in bytes */ reloFieldExists /* field existence in target kernel */ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ @@ -156,7 +121,11 @@ const ( reloEnumvalValue /* enum value integer value */ ) -func (k COREKind) String() string { +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists +} + +func (k coreKind) String() string { switch k { case reloFieldByteOffset: return "byte_off" @@ -187,19 +156,28 @@ func (k COREKind) String() string { } } -func (k COREKind) checksForExistence() bool { - return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists -} - -func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { +// CORERelocate calculates the difference in types between local and target. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) { if local.byteOrder != target.byteOrder { return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) } - var ids []TypeID - relosByID := make(map[TypeID]coreRelos) - result := make(COREFixups, len(relos)) - for _, relo := range relos { + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { if relo.kind == reloTypeIDLocal { // Filtering out reloTypeIDLocal here makes our lives a lot easier // down the line, since it doesn't have a target at all. @@ -207,47 +185,42 @@ func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) } - result[uint64(relo.insnOff)] = COREFixup{ - relo.kind, - uint32(relo.typeID), - uint32(relo.typeID), - false, + id, err := local.TypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint32(id), + target: uint32(id), } continue } - relos, ok := relosByID[relo.typeID] + group, ok := relosByType[relo.typ] if !ok { - ids = append(ids, relo.typeID) + group = &reloGroup{} + relosByType[relo.typ] = group } - relosByID[relo.typeID] = append(relos, relo) + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) } - // Ensure we work on relocations in a deterministic order. - sort.Slice(ids, func(i, j int) bool { - return ids[i] < ids[j] - }) - - for _, id := range ids { - if int(id) >= len(local.types) { - return nil, fmt.Errorf("invalid type id %d", id) - } - - localType := local.types[id] - named, ok := localType.(namedType) - if !ok || named.name() == "" { + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) } - relos := relosByID[id] - targets := target.namedTypes[named.essentialName()] - fixups, err := coreCalculateFixups(localType, targets, relos) + targets := target.namedTypes[newEssentialName(localTypeName)] + fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) } - for i, relo := range relos { - result[uint64(relo.insnOff)] = fixups[i] + for j, index := range group.indices { + result[index] = fixups[j] } } @@ -262,30 +235,30 @@ var errImpossibleRelocation = errors.New("impossible relocation") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) { - localID := local.ID() - local, err := copyType(local, skipQualifierAndTypedef) +func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) { + localID, err := localSpec.TypeID(local) if err != nil { - return nil, err + return nil, fmt.Errorf("local type ID: %w", err) } + local = Copy(local, UnderlyingType) bestScore := len(relos) var bestFixups []COREFixup for i := range targets { - targetID := targets[i].ID() - target, err := copyType(targets[i], skipQualifierAndTypedef) + targetID, err := targetSpec.TypeID(targets[i]) if err != nil { - return nil, err + return nil, fmt.Errorf("target type ID: %w", err) } + target := Copy(targets[i], UnderlyingType) score := 0 // lower is better fixups := make([]COREFixup, 0, len(relos)) for _, relo := range relos { - fixup, err := coreCalculateFixup(local, localID, target, targetID, relo) + fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo) if err != nil { return nil, fmt.Errorf("target %s: %w", target, err) } - if fixup.Poison || fixup.isNonExistant() { + if fixup.poison || fixup.isNonExistant() { score++ } fixups = append(fixups, fixup) @@ -307,17 +280,23 @@ func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]CO // the fixups agree with each other. for i, fixup := range bestFixups { if !fixup.equal(fixups[i]) { - return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation) + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) } } } if bestFixups == nil { // Nothing at all matched, probably because there are no suitable - // targets at all. Poison everything! + // targets at all. + // + // Poison everything except checksForExistence. bestFixups = make([]COREFixup, len(relos)) for i, relo := range relos { - bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true} + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } } } @@ -326,15 +305,18 @@ func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]CO // coreCalculateFixup calculates the fixup for a single local type, target type // and relocation. -func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) { +func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) { fixup := func(local, target uint32) (COREFixup, error) { - return COREFixup{relo.kind, local, target, false}, nil + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint32) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil } poison := func() (COREFixup, error) { if relo.kind.checksForExistence() { return fixup(1, 0) } - return COREFixup{relo.kind, 0, 0, true}, nil + return COREFixup{kind: relo.kind, poison: true}, nil } zero := COREFixup{} @@ -390,7 +372,20 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID return fixup(uint32(localValue.Value), uint32(targetValue.Value)) } - case reloFieldByteOffset, reloFieldByteSize, reloFieldExists: + case reloFieldSigned: + switch local.(type) { + case *Enum: + return fixup(1, 1) + case *Int: + return fixup( + uint32(local.(*Int).Encoding&Signed), + uint32(target.(*Int).Encoding&Signed), + ) + default: + return fixupWithoutValidation(0, 0) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64: if _, ok := target.(*Fwd); ok { // We can't relocate fields using a forward declaration, so // skip it. If a non-forward declaration is present in the BTF @@ -406,12 +401,17 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID return zero, fmt.Errorf("target %s: %w", target, err) } + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + switch relo.kind { case reloFieldExists: return fixup(1, 1) case reloFieldByteOffset: - return fixup(localField.offset/8, targetField.offset/8) + return maybeSkipValidation(fixup(localField.offset, targetField.offset)) case reloFieldByteSize: localSize, err := Sizeof(localField.Type) @@ -423,9 +423,34 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID if err != nil { return zero, err } + return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize))) + + case reloFieldLShiftU64: + var target uint32 + if byteOrder == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } - return fixup(uint32(localSize), uint32(targetSize)) + target = uint32(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + return fixupWithoutValidation(0, uint32(64-targetSize)) } } @@ -462,13 +487,13 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID */ type coreAccessor []int -func parseCoreAccessor(accessor string) (coreAccessor, error) { +func parseCOREAccessor(accessor string) (coreAccessor, error) { if accessor == "" { return nil, fmt.Errorf("empty accessor") } - var result coreAccessor parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) for _, part := range parts { // 31 bits to avoid overflowing int on 32 bit platforms. index, err := strconv.ParseUint(part, 10, 31) @@ -508,18 +533,73 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { return &e.Values[i], nil } +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ type coreField struct { - Type Type + Type Type + + // The position of the field from the start of the composite type in bytes. offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil } -func adjustOffset(base uint32, t Type, n int) (uint32, error) { - size, err := Sizeof(t) +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) if err != nil { - return 0, err + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil } - return base + (uint32(n) * uint32(size) * 8), nil + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, nil + } + return Bits(size * 8), nil } // coreFindField descends into the local type using the accessor and tries to @@ -527,32 +607,33 @@ func adjustOffset(base uint32, t Type, n int) (uint32, error) { // // Returns the field and the offset of the field from the start of // target in bits. -func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) { +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + // The first index is used to offset a pointer of the base type like // when accessing an array. - localOffset, err := adjustOffset(0, local, localAcc[0]) - if err != nil { + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { return coreField{}, coreField{}, err } - targetOffset, err := adjustOffset(0, target, localAcc[0]) - if err != nil { + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { return coreField{}, coreField{}, err } - if err := coreAreMembersCompatible(local, target); err != nil { + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) } var localMaybeFlex, targetMaybeFlex bool - for _, acc := range localAcc[1:] { - switch localType := local.(type) { + for i, acc := range localAcc[1:] { + switch localType := local.Type.(type) { case composite: // For composite types acc is used to find the field in the local type, // and then we try to find a field in target with the same name. localMembers := localType.members() if acc >= len(localMembers) { - return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local) + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) } localMember := localMembers[acc] @@ -563,13 +644,15 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie } // This is an anonymous struct or union, ignore it. - local = localMember.Type - localOffset += localMember.Offset + local = coreField{ + Type: localMember.Type, + offset: local.offset + localMember.Offset.Bytes(), + } localMaybeFlex = false continue } - targetType, ok := target.(composite) + targetType, ok := target.Type.(composite) if !ok { return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) } @@ -579,20 +662,43 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, err } - if targetMember.BitfieldSize > 0 { - return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported) + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, } - - local = localMember.Type localMaybeFlex = acc == len(localMembers)-1 - localOffset += localMember.Offset - target = targetMember.Type + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } targetMaybeFlex = last - targetOffset += targetMember.Offset + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } case *Array: // For arrays, acc is the index in the target. - targetType, ok := target.(*Array) + targetType, ok := target.Type.(*Array) if !ok { return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) } @@ -611,17 +717,23 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) } - local = localType.Type + local = coreField{ + Type: localType.Type, + offset: local.offset, + } localMaybeFlex = false - localOffset, err = adjustOffset(localOffset, local, acc) - if err != nil { + + if err := local.adjustOffsetToNthElement(acc); err != nil { return coreField{}, coreField{}, err } - target = targetType.Type + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } targetMaybeFlex = false - targetOffset, err = adjustOffset(targetOffset, target, acc) - if err != nil { + + if err := target.adjustOffsetToNthElement(acc); err != nil { return coreField{}, coreField{}, err } @@ -629,24 +741,24 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) } - if err := coreAreMembersCompatible(local, target); err != nil { + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { return coreField{}, coreField{}, err } } - return coreField{local, localOffset}, coreField{target, targetOffset}, nil + return local, target, nil } // coreFindMember finds a member in a composite type while handling anonymous // structs and unions. -func coreFindMember(typ composite, name Name) (Member, bool, error) { +func coreFindMember(typ composite, name string) (Member, bool, error) { if name == "" { return Member{}, false, errors.New("can't search for anonymous member") } type offsetTarget struct { composite - offset uint32 + offset Bits } targets := []offsetTarget{{typ, 0}} @@ -704,9 +816,9 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal return nil, nil, errImpossibleRelocation } - localName := localValue.Name.essentialName() + localName := newEssentialName(localValue.Name) for i, targetValue := range targetEnum.Values { - if targetValue.Name.essentialName() != localName { + if newEssentialName(targetValue.Name) != localName { continue } @@ -759,15 +871,9 @@ func coreAreTypesCompatible(localType Type, targetType Type) error { } switch lv := (localType).(type) { - case *Void, *Struct, *Union, *Enum, *Fwd: + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: // Nothing to do here - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - case *Pointer, *Array: depth++ localType.walk(&localTs) @@ -813,6 +919,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) error { * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * [ NB: coreAreMembersCompatible doesn't recurse, this check is done @@ -830,7 +937,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { return nil } - if essentialName(a) == essentialName(b) { + if newEssentialName(a) == newEssentialName(b) { return nil } @@ -848,40 +955,18 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { } switch lv := localType.(type) { - case *Array, *Pointer: + case *Array, *Pointer, *Float, *Int: return nil case *Enum: tv := targetType.(*Enum) - return doNamesMatch(lv.name(), tv.name()) + return doNamesMatch(lv.Name, tv.Name) case *Fwd: tv := targetType.(*Fwd) - return doNamesMatch(lv.name(), tv.name()) - - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - return nil + return doNamesMatch(lv.Name, tv.Name) default: return fmt.Errorf("type %s: %w", localType, ErrNotSupported) } } - -func skipQualifierAndTypedef(typ Type) (Type, error) { - result := typ - for depth := 0; depth <= maxTypeDepth; depth++ { - switch v := (result).(type) { - case qualifier: - result = v.qualify() - case *Typedef: - result = v.Type - default: - return result, nil - } - } - return nil, errors.New("exceeded type depth") -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/agent/vendor/github.com/cilium/ebpf/btf/doc.go similarity index 71% rename from agent/vendor/github.com/cilium/ebpf/internal/btf/doc.go rename to agent/vendor/github.com/cilium/ebpf/btf/doc.go index ad2576cb23c..b1f4b1fc3eb 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/doc.go +++ b/agent/vendor/github.com/cilium/ebpf/btf/doc.go @@ -2,7 +2,4 @@ // // The canonical documentation lives in the Linux kernel repository and is // available at https://www.kernel.org/doc/html/latest/bpf/btf.html -// -// The API is very much unstable. You should only use this via the main -// ebpf library. package btf diff --git a/agent/vendor/github.com/cilium/ebpf/btf/ext_info.go b/agent/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 00000000000..2c0e1afe299 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,721 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string][]funcInfo + lineInfos map[string][]lineInfo + relocationInfos map[string][]coreRelocationInfo +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string][]funcInfo, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncInfos(bfis, ts) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string][]lineInfo, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, ts, strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type funcInfoMeta struct{} +type coreRelocationMeta struct{} + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn) + funcInfos = funcInfos[1:] + } + + if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos[0].line) + lineInfos = lineInfos[1:] + } + + if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo) + reloInfos = reloInfos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + var fiBuf, liBuf bytes.Buffer + for iter.Next() { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &funcInfo{ + fn: fn, + offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, typeID); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if line, ok := iter.Ins.Source().(*Line); ok { + li := &lineInfo{ + line: line, + offset: iter.Offset, + } + if err := li.marshal(&liBuf); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + } + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +type funcInfo struct { + fn *Func + offset asm.RawInstructionOffset +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) { + typ, err := ts.ByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &funcInfo{ + fn, + asm.RawInstructionOffset(fi.InsnOff), + }, nil +} + +func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) { + fis := make([]funcInfo, 0, len(bfis)) + for _, bfi := range bfis { + fi, err := newFuncInfo(bfi, ts) + if err != nil { + return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fis = append(fis, *fi) + } + sort.Slice(fis, func(i, j int) bool { + return fis[i].offset <= fis[j].offset + }) + return fis, nil +} + +// marshal into the BTF wire format. +func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error { + id, err := typeID(fi.fn) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.offset), + TypeID: id, + } + return binary.Write(w, internal.NativeEndian, &bfi) +} + +// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 + + // TODO: We should get rid of the fields below, but for that we need to be + // able to write BTF. + + fileNameOff uint32 + lineOff uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +type lineInfo struct { + line *Line + offset asm.RawInstructionOffset +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) { + line, err := strings.Lookup(li.LineOff) + if err != nil { + return nil, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.Lookup(li.FileNameOff) + if err != nil { + return nil, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return &lineInfo{ + &Line{ + fileName, + line, + lineNumber, + lineColumn, + li.FileNameOff, + li.LineOff, + }, + asm.RawInstructionOffset(li.InsnOff), + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) { + lis := make([]lineInfo, 0, len(blis)) + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis = append(lis, *li) + } + sort.Slice(lis, func(i, j int) bool { + return lis[i].offset <= lis[j].offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *lineInfo) marshal(w io.Writer) error { + line := li.line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + bli := bpfLineInfo{ + uint32(li.offset), + line.fileNameOff, + line.lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + return binary.Write(w, internal.NativeEndian, &bli) +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) { + var out []bpfLineInfo + var li bpfLineInfo + + if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &li); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + + out = append(out, li) + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + typ Type + accessor coreAccessor + kind coreKind +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := ts.ByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) { + rs := make([]coreRelocationInfo, 0, len(brs)) + for _, br := range brs { + relo, err := newRelocationInfo(br, ts, strings) + if err != nil { + return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs = append(rs, *relo) + } + sort.Slice(rs, func(i, j int) bool { + return rs[i].offset < rs[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/agent/vendor/github.com/cilium/ebpf/btf/format.go b/agent/vendor/github.com/cilium/ebpf/btf/format.go new file mode 100644 index 00000000000..e7688a2a6e8 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/btf/format.go @@ -0,0 +1,319 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + switch v := skipQualifiers(typ).(type) { + case *Enum: + fmt.Fprintf(&gf.w, "type %s ", name) + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + return fmt.Errorf("%s: invalid enum size %d", typ, v.Size) + } + + if len(v.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range v.Values { + id := gf.enumIdentifier(name, ev.Name) + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value) + } + gf.w.WriteString(")") + + return nil + + default: + fmt.Fprintf(&gf.w, "type %s ", name) + return gf.writeTypeLit(v, 0) + } +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ = skipQualifiers(typ) + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxTypeDepth { + return errNestedTooDeep + } + + var err error + switch v := skipQualifiers(typ).(type) { + case *Int: + gf.writeIntLit(v) + + case *Enum: + gf.w.WriteString("int32") + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + default: + return fmt.Errorf("type %T: %w", v, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) { + // NB: Encoding.IsChar is ignored. + if i.Encoding.IsBool() && i.Size == 1 { + gf.w.WriteString("bool") + return + } + + bits := i.Size * 8 + if i.Encoding.IsSigned() { + fmt.Fprintf(&gf.w, "int%d", bits) + } else { + fmt.Fprintf(&gf.w, "uint%d", bits) + } +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.Offset.Bytes() + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + size, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + prevOffset = offset + uint32(size) + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxTypeDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v := vsi.Type.(*Var) + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxTypeDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/agent/vendor/github.com/cilium/ebpf/btf/handle.go b/agent/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 00000000000..128e9b35cf3 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,121 @@ +package btf + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsModule returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the last retrieved handle. Only valid after a call to Next. + ID ID + err error +} + +// Next retrieves a handle for the next BTF blob. +// +// [Handle.Close] is called if *handle is non-nil to avoid leaking fds. +// +// Returns true if another BTF blob was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next(handle **Handle) bool { + if *handle != nil { + (*handle).Close() + *handle = nil + } + + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + return false + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + return false + } + + id = attr.NextId + *handle, err = NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + return false + } + + it.ID = id + return true + } +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} diff --git a/agent/vendor/github.com/cilium/ebpf/btf/strings.go b/agent/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 00000000000..67626e0dd17 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,128 @@ +package btf + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +type stringTable struct { + base *stringTable + offsets []uint32 + strings []string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + idx := len(base.offsets) - 1 + firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 + } + + // Derived from vmlinux BTF. + const averageStringLength = 16 + + n := int(r.Size() / averageStringLength) + offsets := make([]uint32, 0, n) + strings := make([]string, 0, n) + + offset := firstStringOffset + scanner := bufio.NewScanner(r) + scanner.Split(splitNull) + for scanner.Scan() { + str := scanner.Text() + offsets = append(offsets, offset) + strings = append(strings, str) + offset += uint32(len(str)) + 1 + } + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(strings) == 0 { + return nil, errors.New("string table is empty") + } + + if firstStringOffset == 0 && strings[0] != "" { + return nil, errors.New("first item in string table is non-empty") + } + + return &stringTable{base, offsets, strings}, nil +} + +func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { + i := bytes.IndexByte(data, 0) + if i == -1 { + if atEOF && len(data) > 0 { + return 0, nil, errors.New("string table isn't null terminated") + } + return 0, nil, nil + } + + return i + 1, data[:i], nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { + return st.base.lookup(offset) + } + return st.lookup(offset) +} + +func (st *stringTable) lookup(offset uint32) (string, error) { + i := search(st.offsets, offset) + if i == len(st.offsets) || st.offsets[i] != offset { + return "", fmt.Errorf("offset %d isn't start of a string", offset) + } + + return st.strings[i], nil +} + +func (st *stringTable) Length() int { + last := len(st.offsets) - 1 + return int(st.offsets[last]) + len(st.strings[last]) + 1 +} + +func (st *stringTable) Marshal(w io.Writer) error { + for _, str := range st.strings { + _, err := io.WriteString(w, str) + if err != nil { + return err + } + _, err = w.Write([]byte{0}) + if err != nil { + return err + } + } + return nil +} + +// search is a copy of sort.Search specialised for uint32. +// +// Licensed under https://go.dev/LICENSE +func search(ints []uint32, needle uint32) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, len(ints) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !(ints[h] >= needle) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} diff --git a/agent/vendor/github.com/cilium/ebpf/btf/types.go b/agent/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 00000000000..402a363c28a --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,1212 @@ +package btf + +import ( + "fmt" + "io" + "math" + "reflect" + "strings" + + "github.com/cilium/ebpf/asm" +) + +const maxTypeDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID uint32 + +// Type represents a type described by BTF. +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // Enumerate all nested Types. Repeated calls must visit nested + // types in the same order. + walk(*typeDeque) +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) +) + +// types is a list of Type. +// +// The order determines the ID of a type. +type types []Type + +func (ts types) ByID(id TypeID) (Type, error) { + if int(id) > len(ts) { + return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound) + } + return ts[id], nil +} + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } +func (v *Void) walk(*typeDeque) {} + +type IntEncoding byte + +const ( + Signed IntEncoding = 1 << iota + Char + Bool +) + +func (ie IntEncoding) IsSigned() bool { + return ie&Signed != 0 +} + +func (ie IntEncoding) IsChar() bool { + return ie&Char != 0 +} + +func (ie IntEncoding) IsBool() bool { + return ie&Bool != 0 +} + +func (ie IntEncoding) String() string { + switch { + case ie.IsChar() && ie.IsSigned(): + return "char" + case ie.IsChar() && !ie.IsSigned(): + return "uchar" + case ie.IsBool(): + return "bool" + case ie.IsSigned(): + return "signed" + default: + return "unsigned" + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size*8) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) walk(*typeDeque) {} +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) walk(tdq *typeDeque) { + tdq.push(&arr.Index) + tdq.push(&arr.Type) +} + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) walk(tdq *typeDeque) { + for i := range s.Members { + tdq.push(&s.Members[i].Type) + } +} + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) walk(tdq *typeDeque) { + for i := range u.Members { + tdq.push(&u.Members[i].Type) + } +} + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value int32 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) walk(*typeDeque) {} +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) walk(*typeDeque) {} +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } +func (td *Typedef) copy() Type { + cpy := *td + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } +func (f *Func) copy() Type { + cpy := *f + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) walk(tdq *typeDeque) { + tdq.push(&fp.Return) + for i := range fp.Params { + tdq.push(&fp.Params[i].Type) + } +} + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } +func (v *Var) copy() Type { + cpy := *v + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) walk(tdq *typeDeque) { + for i := range ds.Vars { + tdq.push(&ds.Vars[i].Type) + } +} + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) walk(*typeDeque) {} +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) walk(*typeDeque) {} +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) +) + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxTypeDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("unsized type %T", typ) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Currently only supports the subset of types necessary for bitfield relocations. +func alignof(typ Type) (int, error) { + switch t := UnderlyingType(typ).(type) { + case *Enum: + return int(t.size()), nil + case *Int: + return int(t.Size), nil + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } +} + +// Transformer modifies a given Type and returns the result. +// +// For example, UnderlyingType removes any qualifiers or typedefs from a type. +// See the example on Copy for how to use a transform. +type Transformer func(Type) Type + +// Copy a Type recursively. +// +// typ may form a cycle. If transform is not nil, it is called with the +// to be copied type, and the returned value is copied instead. +func Copy(typ Type, transform Transformer) Type { + copies := make(copier) + copies.copy(&typ, transform) + return typ +} + +// copy a slice of Types recursively. +// +// See Copy for the semantics. +func copyTypes(types []Type, transform Transformer) []Type { + result := make([]Type, len(types)) + copy(result, types) + + copies := make(copier) + for i := range result { + copies.copy(&result[i], transform) + } + + return result +} + +type copier map[Type]Type + +func (c copier) copy(typ *Type, transform Transformer) { + var work typeDeque + for t := typ; t != nil; t = work.pop() { + // *t is the identity of the type. + if cpy := c[*t]; cpy != nil { + *t = cpy + continue + } + + var cpy Type + if transform != nil { + cpy = transform(*t).copy() + } else { + cpy = (*t).copy() + } + + c[*t] = cpy + *t = cpy + + // Mark any nested types for copying. + cpy.walk(&work) + } +} + +// typeDeque keeps track of pointers to types which still +// need to be visited. +type typeDeque struct { + types []*Type + read, write uint64 + mask uint64 +} + +func (dq *typeDeque) empty() bool { + return dq.read == dq.write +} + +// push adds a type to the stack. +func (dq *typeDeque) push(t *Type) { + if dq.write-dq.read < uint64(len(dq.types)) { + dq.types[dq.write&dq.mask] = t + dq.write++ + return + } + + new := len(dq.types) * 2 + if new == 0 { + new = 8 + } + + types := make([]*Type, new) + pivot := dq.read & dq.mask + n := copy(types, dq.types[pivot:]) + n += copy(types[n:], dq.types[:pivot]) + types[n] = t + + dq.types = types + dq.mask = uint64(new) - 1 + dq.read, dq.write = 0, uint64(n+1) +} + +// shift returns the first element or null. +func (dq *typeDeque) shift() *Type { + if dq.empty() { + return nil + } + + index := dq.read & dq.mask + t := dq.types[index] + dq.types[index] = nil + dq.read++ + return t +} + +// pop returns the last element or null. +func (dq *typeDeque) pop() *Type { + if dq.empty() { + return nil + } + + dq.write-- + index := dq.write & dq.mask + t := dq.types[index] + dq.types[index] = nil + return t +} + +// all returns all elements. +// +// The deque is empty after calling this method. +func (dq *typeDeque) all() []*Type { + length := dq.write - dq.read + types := make([]*Type, 0, length) + for t := dq.shift(); t != nil; t = dq.shift() { + types = append(types, t) + } + return types +} + +// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns +// it into a graph of Types connected via pointers. +// +// If baseTypes are provided, then the raw types are +// considered to be of a split BTF (e.g., a kernel module). +// +// Returns a slice of types indexed by TypeID. Since BTF ignores compilation +// units, multiple types may share the same name. A Type may form a cyclic graph +// by pointing at itself. +func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) { + types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types + + typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1) + + if baseTypes == nil { + // Void is defined to always be type ID 0, and is thus omitted from BTF. + types = append(types, (*Void)(nil)) + } else { + // For split BTF, the next ID is max base BTF type ID + 1 + typeIDOffset = TypeID(len(baseTypes)) + } + + type fixupDef struct { + id TypeID + typ *Type + } + + var fixups []fixupDef + fixup := func(id TypeID, typ *Type) { + if id < TypeID(len(baseTypes)) { + *typ = baseTypes[id] + return + } + + idx := id + if baseTypes != nil { + idx = id - TypeID(len(baseTypes)) + } + if idx < TypeID(len(types)) { + // We've already inflated this type, fix it up immediately. + *typ = types[idx] + return + } + fixups = append(fixups, fixupDef{id, typ}) + } + + type assertion struct { + typ *Type + want reflect.Type + } + + var assertions []assertion + assert := func(typ *Type, want reflect.Type) error { + if *typ != nil { + // The type has already been fixed up, check the type immediately. + if reflect.TypeOf(*typ) != want { + return fmt.Errorf("expected %s, got %T", want, *typ) + } + return nil + } + assertions = append(assertions, assertion{typ, want}) + return nil + } + + type bitfieldFixupDef struct { + id TypeID + m *Member + } + + var ( + legacyBitfields = make(map[TypeID][2]Bits) // offset, size + bitfieldFixups []bitfieldFixupDef + ) + convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { + // NB: The fixup below relies on pre-allocating this array to + // work, since otherwise append might re-allocate members. + members := make([]Member, 0, len(raw)) + for i, btfMember := range raw { + name, err := rawStrings.Lookup(btfMember.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(btfMember.Offset), + }) + + m := &members[i] + fixup(raw[i].Type, &m.Type) + + if kindFlag { + m.BitfieldSize = Bits(btfMember.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := legacyBitfields[raw[i].Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + + if m.Type != nil { + // We couldn't find a legacy bitfield, but we know that the member's + // type has already been inflated. Hence we know that it can't be + // a legacy bitfield and there is nothing left to do. + continue + } + + // We don't have fixup data, and the type we're pointing + // at hasn't been inflated yet. No choice but to defer + // the fixup. + bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{ + raw[i].Type, + m, + }) + } + return members, nil + } + + for i, raw := range rawTypes { + var ( + id = typeIDOffset + TypeID(i) + typ Type + ) + + name, err := rawStrings.Lookup(raw.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch raw.Kind() { + case kindInt: + size := raw.Size() + bi := raw.data.(*btfInt) + if bi.Offset() > 0 || bi.Bits().Bytes() != size { + legacyBitfields[id] = [2]Bits{bi.Offset(), bi.Bits()} + } + typ = &Int{name, raw.Size(), bi.Encoding()} + + case kindPointer: + ptr := &Pointer{nil} + fixup(raw.Type(), &ptr.Target) + typ = ptr + + case kindArray: + btfArr := raw.data.(*btfArray) + arr := &Array{nil, nil, btfArr.Nelems} + fixup(btfArr.IndexType, &arr.Index) + fixup(btfArr.Type, &arr.Type) + typ = arr + + case kindStruct: + members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + typ = &Struct{name, raw.Size(), members} + + case kindUnion: + members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + typ = &Union{name, raw.Size(), members} + + case kindEnum: + rawvals := raw.data.([]btfEnum) + vals := make([]EnumValue, 0, len(rawvals)) + for i, btfVal := range rawvals { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + vals = append(vals, EnumValue{ + Name: name, + Value: btfVal.Val, + }) + } + typ = &Enum{name, raw.Size(), vals} + + case kindForward: + if raw.KindFlag() { + typ = &Fwd{name, FwdUnion} + } else { + typ = &Fwd{name, FwdStruct} + } + + case kindTypedef: + typedef := &Typedef{name, nil} + fixup(raw.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + fixup(raw.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + fixup(raw.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + fixup(raw.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, raw.Linkage()} + fixup(raw.Type(), &fn.Type) + if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil { + return nil, err + } + typ = fn + + case kindFuncProto: + rawparams := raw.data.([]btfParam) + params := make([]FuncParam, 0, len(rawparams)) + for i, param := range rawparams { + name, err := rawStrings.Lookup(param.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + params = append(params, FuncParam{ + Name: name, + }) + } + for i := range params { + fixup(rawparams[i].Type, ¶ms[i].Type) + } + + fp := &FuncProto{nil, params} + fixup(raw.Type(), &fp.Return) + typ = fp + + case kindVar: + variable := raw.data.(*btfVariable) + v := &Var{name, nil, VarLinkage(variable.Linkage)} + fixup(raw.Type(), &v.Type) + typ = v + + case kindDatasec: + btfVars := raw.data.([]btfVarSecinfo) + vars := make([]VarSecinfo, 0, len(btfVars)) + for _, btfVar := range btfVars { + vars = append(vars, VarSecinfo{ + Offset: btfVar.Offset, + Size: btfVar.Size, + }) + } + for i := range vars { + fixup(btfVars[i].Type, &vars[i].Type) + if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil { + return nil, err + } + } + typ = &Datasec{name, raw.SizeType, vars} + + case kindFloat: + typ = &Float{name, raw.Size()} + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) + } + + types = append(types, typ) + } + + for _, fixup := range fixups { + i := int(fixup.id) + if i >= len(types)+len(baseTypes) { + return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) + } + if i < len(baseTypes) { + return nil, fmt.Errorf("fixup for base type id %d is not expected", i) + } + + *fixup.typ = types[i-len(baseTypes)] + } + + for _, bitfieldFixup := range bitfieldFixups { + if bitfieldFixup.id < TypeID(len(baseTypes)) { + return nil, fmt.Errorf("bitfield fixup from split to base types is not expected") + } + + data, ok := legacyBitfields[bitfieldFixup.id] + if ok { + // This is indeed a legacy bitfield, fix it up. + bitfieldFixup.m.Offset += data[0] + bitfieldFixup.m.BitfieldSize = data[1] + } + } + + for _, assertion := range assertions { + if reflect.TypeOf(*assertion.typ) != assertion.want { + return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ) + } + } + + return types, nil +} + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxTypeDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + // This is the same as %T, but elides the package name. Assumes that + // formattableType is implemented by a pointer receiver. + goTypeName := reflect.TypeOf(t).Elem().Name() + _, _ = io.WriteString(f, goTypeName) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/agent/vendor/github.com/cilium/ebpf/collection.go b/agent/vendor/github.com/cilium/ebpf/collection.go index 17cc69492ea..8c2ddc38021 100644 --- a/agent/vendor/github.com/cilium/ebpf/collection.go +++ b/agent/vendor/github.com/cilium/ebpf/collection.go @@ -1,16 +1,14 @@ package ebpf import ( + "encoding/binary" "errors" "fmt" - "io" - "math" "reflect" "strings" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/btf" ) // CollectionOptions control loading a collection into the kernel. @@ -19,12 +17,31 @@ import ( type CollectionOptions struct { Maps MapOptions Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map } // CollectionSpec describes a collection. type CollectionSpec struct { Maps map[string]*MapSpec Programs map[string]*ProgramSpec + + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder } // Copy returns a recursive copy of the spec. @@ -34,8 +51,10 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { } cpy := CollectionSpec{ - Maps: make(map[string]*MapSpec, len(cs.Maps)), - Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + ByteOrder: cs.ByteOrder, + Types: cs.Types, } for name, spec := range cs.Maps { @@ -55,19 +74,21 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { // when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. // // Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { for symbol, m := range maps { // have we seen a program that uses this symbol / map seen := false - fd := m.FD() for progName, progSpec := range cs.Programs { - err := progSpec.Instructions.RewriteMapPtr(symbol, fd) + err := progSpec.Instructions.AssociateMap(symbol, m) switch { case err == nil: seen = true - case asm.IsUnreferencedSymbol(err): + case errors.Is(err, asm.ErrUnreferencedSymbol): // Not all programs need to use the map default: @@ -101,48 +122,81 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { // // Returns an error if a constant doesn't exist. func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { - rodata := cs.Maps[".rodata"] - if rodata == nil { - return errors.New("missing .rodata section") - } + replaced := make(map[string]bool) - if rodata.BTF == nil { - return errors.New(".rodata section has no BTF") - } + for name, spec := range cs.Maps { + if !strings.HasPrefix(name, ".rodata") { + continue + } - if n := len(rodata.Contents); n != 1 { - return fmt.Errorf("expected one key in .rodata, found %d", n) - } + b, ds, err := spec.dataSection() + if errors.Is(err, errMapNoBTFValue) { + // Data sections without a BTF Datasec are valid, but don't support + // constant replacements. + continue + } + if err != nil { + return fmt.Errorf("map %s: %w", name, err) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + for _, v := range ds.Vars { + vname := v.Type.TypeName() + replacement, ok := consts[vname] + if !ok { + continue + } + + if replaced[vname] { + return fmt.Errorf("section %s: duplicate variable %s", name, vname) + } + + if int(v.Offset+v.Size) > len(cpy) { + return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname) + } + + b, err := marshalBytes(replacement, int(v.Size)) + if err != nil { + return fmt.Errorf("marshaling constant replacement %s: %w", vname, err) + } + + copy(cpy[v.Offset:v.Offset+v.Size], b) + + replaced[vname] = true + } - kv := rodata.Contents[0] - value, ok := kv.Value.([]byte) - if !ok { - return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value) + spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy} } - buf := make([]byte, len(value)) - copy(buf, value) + var missing []string + for c := range consts { + if !replaced[c] { + missing = append(missing, c) + } + } - err := patchValue(buf, btf.MapValue(rodata.BTF), consts) - if err != nil { - return err + if len(missing) != 0 { + return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ",")) } - rodata.Contents[0] = MapKV{kv.Key, buf} return nil } // Assign the contents of a CollectionSpec to a struct. // -// This function is a short-cut to manually checking the presence -// of maps and programs in a collection spec. Consider using bpf2go if this -// sounds useful. +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. // -// The argument to must be a pointer to a struct. A field of the +// 'to' must be a pointer to a struct. A field of the // struct is updated with values from Programs or Maps if it // has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. -// The tag gives the name of the program or map as found in -// the CollectionSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. // // struct { // Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` @@ -150,42 +204,50 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error // Ignored int // } // -// Returns an error if any of the fields can't be found, or -// if the same map or program is assigned multiple times. +// Returns an error if any of the eBPF objects can't be found, or +// if the same MapSpec or ProgramSpec is assigned multiple times. func (cs *CollectionSpec) Assign(to interface{}) error { - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { + // Assign() only supports assigning ProgramSpecs and MapSpecs, + // so doesn't load any resources into the kernel. + getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { + case reflect.TypeOf((*ProgramSpec)(nil)): - p := cs.Programs[name] - if p == nil { - return reflect.Value{}, fmt.Errorf("missing program %q", name) + if p := cs.Programs[name]; p != nil { + return p, nil } - return reflect.ValueOf(p), nil + return nil, fmt.Errorf("missing program %q", name) + case reflect.TypeOf((*MapSpec)(nil)): - m := cs.Maps[name] - if m == nil { - return reflect.Value{}, fmt.Errorf("missing map %q", name) + if m := cs.Maps[name]; m != nil { + return m, nil } - return reflect.ValueOf(m), nil + return nil, fmt.Errorf("missing map %q", name) + default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) + return nil, fmt.Errorf("unsupported type %s", typ) } } - return assignValues(to, valueOf) + return assignValues(to, getValue) } -// LoadAndAssign maps and programs into the kernel and assign them to a struct. +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. // -// This function is a short-cut to manually checking the presence -// of maps and programs in a collection spec. Consider using bpf2go if this -// sounds useful. +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. // -// The argument to must be a pointer to a struct. A field of the -// struct is updated with values from Programs or Maps if it -// has an `ebpf` tag and its type is *Program or *Map. -// The tag gives the name of the program or map as found in -// the CollectionSpec. +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. // // struct { // Foo *ebpf.Program `ebpf:"xdp_foo"` @@ -196,39 +258,70 @@ func (cs *CollectionSpec) Assign(to interface{}) error { // opts may be nil. // // Returns an error if any of the fields can't be found, or -// if the same map or program is assigned multiple times. +// if the same Map or Program is assigned multiple times. func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { - if opts == nil { - opts = &CollectionOptions{} + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err } + defer loader.close() - loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts) - defer cleanup() + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { + getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { + case reflect.TypeOf((*Program)(nil)): - p, err := loadProgram(name) - if err != nil { - return reflect.Value{}, err - } - return reflect.ValueOf(p), nil + assignedProgs[name] = true + return loader.loadProgram(name) + case reflect.TypeOf((*Map)(nil)): - m, err := loadMap(name) - if err != nil { - return reflect.Value{}, err - } - return reflect.ValueOf(m), nil + assignedMaps[name] = true + return loader.loadMap(name) + default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) + return nil, fmt.Errorf("unsupported type %s", typ) } } - if err := assignValues(to, valueOf); err != nil { + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { + return err + } + + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateMaps(); err != nil { return err } - done() + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + switch m.typ { + case ProgramArray: + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } + return nil } @@ -239,31 +332,54 @@ type Collection struct { Maps map[string]*Map } -// NewCollection creates a Collection from a specification. +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func NewCollection(spec *CollectionSpec) (*Collection, error) { return NewCollectionWithOptions(spec, CollectionOptions{}) } -// NewCollectionWithOptions creates a Collection from a specification. +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { - loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts) - defer cleanup() + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() + // Create maps first, as their fds need to be linked into programs. for mapName := range spec.Maps { - _, err := loadMap(mapName) - if err != nil { + if _, err := loader.loadMap(mapName); err != nil { return nil, err } } - for progName := range spec.Programs { - _, err := loadProgram(progName) - if err != nil { + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + + if _, err := loader.loadProgram(progName); err != nil { return nil, err } } - maps, progs := done() + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateMaps(); err != nil { + return nil, err + } + + // Prevent loader.cleanup from closing maps and programs. + maps, progs := loader.maps, loader.programs + loader.maps, loader.programs = nil, nil + return &Collection{ progs, maps, @@ -272,13 +388,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co type handleCache struct { btfHandles map[*btf.Spec]*btf.Handle - btfSpecs map[io.ReaderAt]*btf.Spec } func newHandleCache() *handleCache { return &handleCache{ btfHandles: make(map[*btf.Spec]*btf.Handle), - btfSpecs: make(map[io.ReaderAt]*btf.Spec), } } @@ -296,134 +410,198 @@ func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) { return handle, nil } -func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) { - if hc.btfSpecs[rd] != nil { - return hc.btfSpecs[rd], nil - } - - spec, err := btf.LoadSpecFromReader(rd) - if err != nil { - return nil, err - } - - hc.btfSpecs[rd] = spec - return spec, nil -} - func (hc handleCache) close() { for _, handle := range hc.btfHandles { handle.Close() } - hc.btfHandles = nil - hc.btfSpecs = nil } -func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) ( - loadMap func(string) (*Map, error), - loadProgram func(string) (*Program, error), - done func() (map[string]*Map, map[string]*Program), - cleanup func(), -) { - var ( - maps = make(map[string]*Map) - progs = make(map[string]*Program) - handles = newHandleCache() - skipMapsAndProgs = false - ) - - cleanup = func() { - handles.close() - - if skipMapsAndProgs { - return - } +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program + handles *handleCache +} - for _, m := range maps { - m.Close() +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { + if opts == nil { + opts = &CollectionOptions{} + } + + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name, m := range opts.MapReplacements { + spec, ok := coll.Maps[name] + if !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) } - for _, p := range progs { - p.Close() + if err := spec.checkCompatibility(m); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) } } - done = func() (map[string]*Map, map[string]*Program) { - skipMapsAndProgs = true - return maps, progs + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + newHandleCache(), + }, nil +} + +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { + cl.handles.close() + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil } - loadMap = func(mapName string) (*Map, error) { - if m := maps[mapName]; m != nil { - return m, nil - } + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) + } - mapSpec := coll.Maps[mapName] - if mapSpec == nil { - return nil, fmt.Errorf("missing map %s", mapName) - } + if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF { + return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName) + } - m, err := newMapWithOptions(mapSpec, opts.Maps, handles) + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() if err != nil { - return nil, fmt.Errorf("map %s: %w", mapName, err) + return nil, err } - maps[mapName] = m + cl.maps[mapName] = m return m, nil } - loadProgram = func(progName string) (*Program, error) { - if prog := progs[progName]; prog != nil { - return prog, nil - } + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) + } - progSpec := coll.Programs[progName] - if progSpec == nil { - return nil, fmt.Errorf("unknown program %s", progName) - } + cl.maps[mapName] = m + return m, nil +} - progSpec = progSpec.Copy() +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } - // Rewrite any reference to a valid map. - for i := range progSpec.Instructions { - ins := &progSpec.Instructions[i] + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } - if !ins.IsLoadFromMap() || ins.Reference == "" { - continue - } + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } - if uint32(ins.Constant) != math.MaxUint32 { - // Don't overwrite maps already rewritten, users can - // rewrite programs in the spec themselves - continue - } + if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF { + return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName) + } - m, err := loadMap(ins.Reference) - if err != nil { - return nil, fmt.Errorf("program %s: %w", progName, err) - } + progSpec = progSpec.Copy() - fd := m.FD() - if fd < 0 { - return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) - } - if err := ins.RewriteMapPtr(m.FD()); err != nil { - return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err) - } + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference() == "" { + continue + } + + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { + continue } - prog, err := newProgramWithOptions(progSpec, opts.Programs, handles) + m, err := cl.loadMap(ins.Reference()) if err != nil { return nil, fmt.Errorf("program %s: %w", progName, err) } - progs[progName] = prog - return prog, nil + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) + } + } + + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + cl.programs[progName] = prog + return prog, nil +} + +func (cl *collectionLoader) populateMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } + + mapSpec = mapSpec.Copy() + + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. + for i, kv := range mapSpec.Contents { + if objName, ok := kv.Value.(string); ok { + switch mapSpec.Type { + case ProgramArray: + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case ArrayOfMaps, HashOfMaps: + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} + } + } + } + + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) + } } - return + return nil } -// LoadCollection parses an object file and converts it to a collection. +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func LoadCollection(file string) (*Collection, error) { spec, err := LoadCollectionSpec(file) if err != nil { @@ -466,108 +644,81 @@ func (coll *Collection) DetachProgram(name string) *Program { return p } -// Assign the contents of a collection to a struct. -// -// Deprecated: use CollectionSpec.Assign instead. It provides the same -// functionality but creates only the maps and programs requested. -func (coll *Collection) Assign(to interface{}) error { - assignedMaps := make(map[string]struct{}) - assignedPrograms := make(map[string]struct{}) - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { - switch typ { - case reflect.TypeOf((*Program)(nil)): - p := coll.Programs[name] - if p == nil { - return reflect.Value{}, fmt.Errorf("missing program %q", name) - } - assignedPrograms[name] = struct{}{} - return reflect.ValueOf(p), nil - case reflect.TypeOf((*Map)(nil)): - m := coll.Maps[name] - if m == nil { - return reflect.Value{}, fmt.Errorf("missing map %q", name) - } - assignedMaps[name] = struct{}{} - return reflect.ValueOf(m), nil - default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) - } - } - - if err := assignValues(to, valueOf); err != nil { - return err - } +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} - for name := range assignedPrograms { - coll.DetachProgram(name) +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) } - for name := range assignedMaps { - coll.DetachMap(name) + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) } - return nil -} - -func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error { - type structField struct { - reflect.StructField - value reflect.Value + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) } - var ( - fields []structField - visitedTypes = make(map[reflect.Type]bool) - flattenStruct func(reflect.Value) error - ) - - flattenStruct = func(structVal reflect.Value) error { - structType := structVal.Type() - if structType.Kind() != reflect.Struct { - return fmt.Errorf("%s is not a struct", structType) - } + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} - if visitedTypes[structType] { - return fmt.Errorf("recursion on type %s", structType) + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue } - for i := 0; i < structType.NumField(); i++ { - field := structField{structType.Field(i), structVal.Field(i)} - - name := field.Tag.Get("ebpf") - if name != "" { - fields = append(fields, field) + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { continue } - var err error - switch field.Type.Kind() { - case reflect.Ptr: - if field.Type.Elem().Kind() != reflect.Struct { - continue - } - - if field.value.IsNil() { - return fmt.Errorf("nil pointer to %s", structType) - } + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } - err = flattenStruct(field.value.Elem()) + // Obtain the destination type of the pointer. + v = field.value.Elem() - case reflect.Struct: - err = flattenStruct(field.value) + case reflect.Struct: + // Reference the value's type directly. + v = field.value - default: - continue - } + default: + continue + } - if err != nil { - return fmt.Errorf("field %s: %w", field.Name, err) - } + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) } - return nil + fields = append(fields, inner...) } + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + toValue := reflect.ValueOf(to) if toValue.Type().Kind() != reflect.Ptr { return fmt.Errorf("%T is not a pointer to struct", to) @@ -577,7 +728,8 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va return fmt.Errorf("nil pointer to %T", to) } - if err := flattenStruct(toValue.Elem()); err != nil { + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { return err } @@ -587,19 +739,23 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va name string } - assignedTo := make(map[elem]string) + assigned := make(map[elem]string) for _, field := range fields { - name := field.Tag.Get("ebpf") - if strings.Contains(name, ",") { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) } - e := elem{field.Type, name} - if assignedField := assignedTo[e]; assignedField != "" { - return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField) + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) } - value, err := valueOf(field.Type, name) + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) if err != nil { return fmt.Errorf("field %s: %w", field.Name, err) } @@ -607,9 +763,9 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va if !field.value.CanSet() { return fmt.Errorf("field %s: can't set value", field.Name) } + field.value.Set(reflect.ValueOf(value)) - field.value.Set(value) - assignedTo[e] = field.Name + assigned[e] = field.Name } return nil diff --git a/agent/vendor/github.com/cilium/ebpf/doc.go b/agent/vendor/github.com/cilium/ebpf/doc.go index f7f34da8f44..396b3394d33 100644 --- a/agent/vendor/github.com/cilium/ebpf/doc.go +++ b/agent/vendor/github.com/cilium/ebpf/doc.go @@ -13,4 +13,13 @@ // your application as any other resource. // // Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. package ebpf diff --git a/agent/vendor/github.com/cilium/ebpf/elf_reader.go b/agent/vendor/github.com/cilium/ebpf/elf_reader.go index c2afbc36a5a..df278895c63 100644 --- a/agent/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/agent/vendor/github.com/cilium/ebpf/elf_reader.go @@ -13,19 +13,20 @@ import ( "strings" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" "github.com/cilium/ebpf/internal/unix" ) // elfCode is a convenience to reduce the amount of arguments that have to -// be passed around explicitly. You should treat it's contents as immutable. +// be passed around explicitly. You should treat its contents as immutable. type elfCode struct { *internal.SafeELFFile sections map[elf.SectionIndex]*elfSection license string version uint32 btf *btf.Spec + extInfo *btf.ExtInfos } // LoadCollectionSpec parses an ELF file into a CollectionSpec. @@ -49,7 +50,6 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { if err != nil { return nil, err } - defer f.Close() var ( licenseSection *elf.Section @@ -95,77 +95,29 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load version: %w", err) } - btfSpec, err := btf.LoadSpecFromReader(rd) + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) if err != nil && !errors.Is(err, btf.ErrNotFound) { return nil, fmt.Errorf("load BTF: %w", err) } - // Assign symbols to all the sections we're interested in. - symbols, err := f.Symbols() - if err != nil { - return nil, fmt.Errorf("load symbols: %v", err) - } - - for _, symbol := range symbols { - idx := symbol.Section - symType := elf.ST_TYPE(symbol.Info) - - section := sections[idx] - if section == nil { - continue - } - - // Older versions of LLVM don't tag symbols correctly, so keep - // all NOTYPE ones. - keep := symType == elf.STT_NOTYPE - switch section.kind { - case mapSection, btfMapSection, dataSection: - keep = keep || symType == elf.STT_OBJECT - case programSection: - keep = keep || symType == elf.STT_FUNC - } - if !keep || symbol.Name == "" { - continue - } - - section.symbols[symbol.Value] = symbol - } - ec := &elfCode{ SafeELFFile: f, sections: sections, license: license, version: version, btf: btfSpec, + extInfo: btfExtInfo, } - // Go through relocation sections, and parse the ones for sections we're - // interested in. Make sure that relocations point at valid sections. - for idx, relSection := range relSections { - section := sections[idx] - if section == nil { - continue - } - - rels, err := ec.loadRelocations(relSection, symbols) - if err != nil { - return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err) - } - - for _, rel := range rels { - target := sections[rel.Section] - if target == nil { - return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) - } - - if target.Flags&elf.SHF_STRINGS > 0 { - return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) - } + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } - target.references++ - } + ec.assignSymbols(symbols) - section.relocations = rels + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) } // Collect all the various ways to define maps. @@ -183,12 +135,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { } // Finally, collect programs and link them. - progs, err := ec.loadPrograms() + progs, err := ec.loadProgramSections() if err != nil { return nil, fmt.Errorf("load programs: %w", err) } - return &CollectionSpec{maps, progs}, nil + return &CollectionSpec{maps, progs, btfSpec, ec.ByteOrder}, nil } func loadLicense(sec *elf.Section) (string, error) { @@ -247,12 +199,91 @@ func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { } } -func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { - var ( - progs []*ProgramSpec - libs []*ProgramSpec - ) +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + + symSection.symbols[symbol.Value] = symbol + } +} + +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + if target.Flags&elf.SHF_STRINGS > 0 { + return fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + + // Generate a ProgramSpec for each function found in each program section. + var export []string for _, sec := range ec.sections { if sec.kind != programSection { continue @@ -262,86 +293,144 @@ func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) { return nil, fmt.Errorf("section %v: missing symbols", sec.Name) } - funcSym, ok := sec.symbols[0] - if !ok { - return nil, fmt.Errorf("section %v: no label at start", sec.Name) - } - - insns, length, err := ec.loadInstructions(sec) + funcs, err := ec.loadFunctions(sec) if err != nil { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + return nil, fmt.Errorf("section %v: %w", sec.Name, err) } progType, attachType, progFlags, attachTo := getProgType(sec.Name) - spec := &ProgramSpec{ - Name: funcSym.Name, - Type: progType, - Flags: progFlags, - AttachType: attachType, - AttachTo: attachTo, - License: ec.license, - KernelVersion: ec.version, - Instructions: insns, - ByteOrder: ec.ByteOrder, - } + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + BTF: ec.btf, + } - if ec.btf != nil { - spec.BTF, err = ec.btf.Program(sec.Name, length) - if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { - return nil, fmt.Errorf("program %s: %w", funcSym.Name, err) + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) } - } + progs[name] = spec - if spec.Type == UnspecifiedProgram { - // There is no single name we can use for "library" sections, - // since they may contain multiple functions. We'll decode the - // labels they contain later on, and then link sections that way. - libs = append(libs, spec) - } else { - progs = append(progs, spec) + if spec.SectionName != ".text" { + export = append(export, name) + } } } - res := make(map[string]*ProgramSpec, len(progs)) - for _, prog := range progs { - err := link(prog, libs) - if err != nil { - return nil, fmt.Errorf("program %s: %w", prog.Name, err) + flattenPrograms(progs, export) + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) } - res[prog.Name] = prog } - return res, nil + return progs, nil } -func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) { - var ( - r = bufio.NewReader(section.Open()) - insns asm.Instructions - offset uint64 - ) - for { - var ins asm.Instruction - n, err := ins.Unmarshal(r, ec.ByteOrder) - if err == io.EOF { - return insns, offset, nil - } - if err != nil { - return nil, 0, fmt.Errorf("offset %d: %w", offset, err) - } +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { + r := bufio.NewReader(section.Open()) + + // Decode the section's instruction stream. + var insns asm.Instructions + if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } - ins.Symbol = section.symbols[offset].Name + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) + } + + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. if rel, ok := section.relocations[offset]; ok { - if err = ec.relocateInstruction(&ins, rel); err != nil { - return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) + } + } else { + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) } } + } + + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) + } - insns = append(insns, ins) - offset += n + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) } func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { @@ -367,18 +456,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Src = asm.PseudoMapFD - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - case dataSection: var offset uint32 switch typ { case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind) } // This is really a reference to a static symbol, which clang doesn't @@ -387,8 +470,17 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err offset = uint32(uint64(ins.Constant)) case elf.STT_OBJECT: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind) } offset = uint32(rel.Value) @@ -406,51 +498,71 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Constant = int64(uint64(offset) << 32) ins.Src = asm.PseudoMapValue - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - case programSection: - if ins.OpCode.JumpOp() != asm.Call { - return fmt.Errorf("not a call instruction: %s", ins) - } + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } - if ins.Src != asm.PseudoCall { - return fmt.Errorf("call: %s: incorrect source register", name) - } + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } - switch typ { - case elf.STT_NOTYPE, elf.STT_FUNC: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) - } + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + } - case elf.STT_SECTION: - if bind != elf.STB_LOCAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + } + + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. - // The function we want to call is in the indicated section, - // at the offset encoded in the instruction itself. Reverse - // the calculation to find the real function we're looking for. - // A value of -1 references the first instruction in the section. - offset := int64(int32(ins.Constant)+1) * asm.InstructionSize - if offset < 0 { - return fmt.Errorf("call: %s: invalid offset %d", name, offset) + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) } - sym, ok := target.symbols[uint64(offset)] + sym, ok := target.symbols[uint64(ins.Constant)] if !ok { - return fmt.Errorf("call: %s: no symbol at offset %d", name, offset) + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) } - ins.Constant = -1 name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc default: - return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + return fmt.Errorf("neither a call nor a load instruction: %v", ins) } case undefSection: @@ -468,7 +580,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) } - ins.Reference = name + *ins = ins.WithReference(name) return nil } @@ -520,8 +632,12 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return fmt.Errorf("map %s: missing flags", mapName) } - if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil { - return fmt.Errorf("map %s: unknown and non-zero fields in definition", mapName) + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = bytes.NewReader(extra) } if err := spec.clampPerfEventArraySize(); err != nil { @@ -535,6 +651,9 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return nil } +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { for _, sec := range ec.sections { if sec.kind != btfMapSection { @@ -545,33 +664,46 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return fmt.Errorf("missing BTF") } - _, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open())) - if err != nil { - return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported) - } - - var ds btf.Datasec - if err := ec.btf.FindType(sec.Name, &ds); err != nil { + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) } + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. v, ok := vs.Type.(*btf.Var) if !ok { return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) } name := string(v.Name) + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + if maps[name] != nil { return fmt.Errorf("section %v: map %s already exists", sec.Name, name) } + // Each Var representing a BTF map definition contains a Struct. mapStruct, ok := v.Type.(*btf.Struct) if !ok { return fmt.Errorf("expected struct, got %s", v.Type) } - mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf) + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) if err != nil { return fmt.Errorf("map %v: %w", name, err) } @@ -582,6 +714,16 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { maps[name] = mapSpec } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } } return nil @@ -590,24 +732,26 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { // mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing // a BTF map definition. The name and spec arguments will be copied to the // resulting MapSpec, and inner must be true on any resursive invocations. -func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) { - +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { var ( - key, value btf.Type - keySize, valueSize uint32 - mapType, flags, maxEntries uint32 - pinType PinType - innerMapSpec *MapSpec - err error + key, value btf.Type + keySize, valueSize uint32 + mapType MapType + flags, maxEntries uint32 + pinType PinType + innerMapSpec *MapSpec + contents []MapKV + err error ) for i, member := range def.Members { switch member.Name { case "type": - mapType, err = uintFromBTF(member.Type) + mt, err := uintFromBTF(member.Type) if err != nil { return nil, fmt.Errorf("can't get type: %w", err) } + mapType = MapType(mt) case "map_flags": flags, err = uintFromBTF(member.Type) @@ -717,7 +861,7 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (* case *btf.Struct: // The values member pointing to an array of structs means we're expecting // a map-in-map declaration. - if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps { + if mapType != ArrayOfMaps && mapType != HashOfMaps { return nil, errors.New("outer map needs to be an array or a hash of maps") } if inner { @@ -731,21 +875,38 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (* // on kernels 5.2 and up) // Pass the BTF spec from the parent object, since both parent and // child must be created from the same BTF blob (on kernels that support BTF). - innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec) + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) if err != nil { return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) } + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + default: return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) } + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + default: return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) } } - bm := btf.NewMap(spec, key, value) + if key == nil { + key = &btf.Void{} + } + if value == nil { + value = &btf.Void{} + } return &MapSpec{ Name: SanitizeName(name, -1), @@ -754,9 +915,12 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (* ValueSize: valueSize, MaxEntries: maxEntries, Flags: flags, - BTF: &bm, + Key: key, + Value: value, + BTF: spec, Pinning: pinType, InnerMap: innerMapSpec, + Contents: contents, }, nil } @@ -793,6 +957,64 @@ func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { return ptr.Target, nil } +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := member.Offset.Bytes() + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.SectionHeader.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), r.Name}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), r.Name}) + default: + return nil, fmt.Errorf("unknown relocation type %v", t) + } + } + + return contents, nil +} + func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { for _, sec := range ec.sections { if sec.kind != dataSection { @@ -805,15 +1027,6 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { continue } - if ec.btf == nil { - return errors.New("data sections require BTF, make sure all consts are marked as static") - } - - btfMap, err := ec.btf.Datasec(sec.Name) - if err != nil { - return err - } - data, err := sec.Data() if err != nil { return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) @@ -830,14 +1043,25 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { ValueSize: uint32(len(data)), MaxEntries: 1, Contents: []MapKV{{uint32(0), data}}, - BTF: btfMap, } - switch sec.Name { - case ".rodata": + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.BTF = ec.btf + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + } + } + + switch n := sec.Name; { + case strings.HasPrefix(n, ".rodata"): mapSpec.Flags = unix.BPF_F_RDONLY_PROG mapSpec.Freeze = true - case ".bss": + case n == ".bss": // The kernel already zero-initializes the map mapSpec.Contents = nil } @@ -848,83 +1072,103 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { } func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { - types := map[string]struct { + types := []struct { + prefix string progType ProgramType attachType AttachType progFlags uint32 }{ - // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c - "socket": {SocketFilter, AttachNone, 0}, - "seccomp": {SocketFilter, AttachNone, 0}, - "kprobe/": {Kprobe, AttachNone, 0}, - "uprobe/": {Kprobe, AttachNone, 0}, - "kretprobe/": {Kprobe, AttachNone, 0}, - "uretprobe/": {Kprobe, AttachNone, 0}, - "tracepoint/": {TracePoint, AttachNone, 0}, - "raw_tracepoint/": {RawTracepoint, AttachNone, 0}, - "raw_tp/": {RawTracepoint, AttachNone, 0}, - "tp_btf/": {Tracing, AttachTraceRawTp, 0}, - "xdp": {XDP, AttachNone, 0}, - "perf_event": {PerfEvent, AttachNone, 0}, - "lwt_in": {LWTIn, AttachNone, 0}, - "lwt_out": {LWTOut, AttachNone, 0}, - "lwt_xmit": {LWTXmit, AttachNone, 0}, - "lwt_seg6local": {LWTSeg6Local, AttachNone, 0}, - "sockops": {SockOps, AttachCGroupSockOps, 0}, - "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_msg": {SkMsg, AttachSkSKBStreamVerdict, 0}, - "lirc_mode2": {LircMode2, AttachLircMode2, 0}, - "flow_dissector": {FlowDissector, AttachFlowDissector, 0}, - "iter/": {Tracing, AttachTraceIter, 0}, - "fentry/": {Tracing, AttachTraceFEntry, 0}, - "fmod_ret/": {Tracing, AttachModifyReturn, 0}, - "fexit/": {Tracing, AttachTraceFExit, 0}, - "fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, - "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, - "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, - "sk_lookup/": {SkLookup, AttachSkLookup, 0}, - "lsm/": {LSM, AttachLSMMac, 0}, - "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, - - "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0}, - "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0}, - "cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0}, - "cgroup/skb": {CGroupSKB, AttachNone, 0}, - "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0}, - "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0}, - "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0}, - "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0}, - "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0}, - "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0}, - "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0}, - "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, - "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, - "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, - "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, - "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0}, - "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0}, - "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, - "classifier": {SchedCLS, AttachNone, 0}, - "action": {SchedACT, AttachNone, 0}, - } - - for prefix, t := range types { - if !strings.HasPrefix(sectionName, prefix) { + // Please update the types from libbpf.c and follow the order of it. + // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c + {"socket", SocketFilter, AttachNone, 0}, + {"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, + {"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0}, + {"kprobe/", Kprobe, AttachNone, 0}, + {"uprobe/", Kprobe, AttachNone, 0}, + {"kretprobe/", Kprobe, AttachNone, 0}, + {"uretprobe/", Kprobe, AttachNone, 0}, + {"tc", SchedCLS, AttachNone, 0}, + {"classifier", SchedCLS, AttachNone, 0}, + {"action", SchedACT, AttachNone, 0}, + {"tracepoint/", TracePoint, AttachNone, 0}, + {"tp/", TracePoint, AttachNone, 0}, + {"raw_tracepoint/", RawTracepoint, AttachNone, 0}, + {"raw_tp/", RawTracepoint, AttachNone, 0}, + {"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0}, + {"raw_tp.w/", RawTracepointWritable, AttachNone, 0}, + {"tp_btf/", Tracing, AttachTraceRawTp, 0}, + {"fentry/", Tracing, AttachTraceFEntry, 0}, + {"fmod_ret/", Tracing, AttachModifyReturn, 0}, + {"fexit/", Tracing, AttachTraceFExit, 0}, + {"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, + {"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, + {"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, + {"freplace/", Extension, AttachNone, 0}, + {"lsm/", LSM, AttachLSMMac, 0}, + {"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, + {"iter/", Tracing, AttachTraceIter, 0}, + {"syscall", Syscall, AttachNone, 0}, + {"xdp_devmap/", XDP, AttachXDPDevMap, 0}, + {"xdp_cpumap/", XDP, AttachXDPCPUMap, 0}, + {"xdp", XDP, AttachNone, 0}, + {"perf_event", PerfEvent, AttachNone, 0}, + {"lwt_in", LWTIn, AttachNone, 0}, + {"lwt_out", LWTOut, AttachNone, 0}, + {"lwt_xmit", LWTXmit, AttachNone, 0}, + {"lwt_seg6local", LWTSeg6Local, AttachNone, 0}, + {"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0}, + {"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0}, + {"cgroup/skb", CGroupSKB, AttachNone, 0}, + {"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0}, + {"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0}, + {"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0}, + {"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0}, + {"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0}, + {"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0}, + {"sockops", SockOps, AttachCGroupSockOps, 0}, + {"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0}, + {"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0}, + {"sk_skb", SkSKB, AttachNone, 0}, + {"sk_msg", SkMsg, AttachSkMsgVerdict, 0}, + {"lirc_mode2", LircMode2, AttachLircMode2, 0}, + {"flow_dissector", FlowDissector, AttachFlowDissector, 0}, + {"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0}, + {"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0}, + {"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0}, + {"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0}, + {"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, + {"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, + {"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, + {"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, + {"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, + {"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, + {"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, + {"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, + {"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0}, + {"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0}, + {"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0}, + {"struct_ops+", StructOps, AttachNone, 0}, + {"sk_lookup/", SkLookup, AttachSkLookup, 0}, + + {"seccomp", SocketFilter, AttachNone, 0}, + } + + for _, t := range types { + if !strings.HasPrefix(sectionName, t.prefix) { continue } - if !strings.HasSuffix(prefix, "/") { + if !strings.HasSuffix(t.prefix, "/") { return t.progType, t.attachType, t.progFlags, "" } - return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):] + return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):] } return UnspecifiedProgram, AttachNone, 0, "" } -func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { rels := make(map[uint64]elf.Symbol) if sec.Entsize < 16 { diff --git a/agent/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/agent/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go deleted file mode 100644 index d46d135f2fc..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package ebpf - -import "bytes" - -func FuzzLoadCollectionSpec(data []byte) int { - spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data)) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} diff --git a/agent/vendor/github.com/cilium/ebpf/info.go b/agent/vendor/github.com/cilium/ebpf/info.go index b95131ef572..ae77bc6197f 100644 --- a/agent/vendor/github.com/cilium/ebpf/info.go +++ b/agent/vendor/github.com/cilium/ebpf/info.go @@ -2,6 +2,7 @@ package ebpf import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -10,8 +11,13 @@ import ( "strings" "syscall" "time" + "unsafe" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" ) // MapInfo describes a map. @@ -22,12 +28,13 @@ type MapInfo struct { ValueSize uint32 MaxEntries uint32 Flags uint32 - // Name as supplied by user space at load time. + // Name as supplied by user space at load time. Available from 4.15. Name string } -func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { - info, err := bpfGetMapInfoByFD(fd) +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err := sys.ObjInfo(fd, &info) if errors.Is(err, syscall.EINVAL) { return newMapInfoFromProc(fd) } @@ -36,18 +43,17 @@ func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) { } return &MapInfo{ - MapType(info.map_type), - MapID(info.id), - info.key_size, - info.value_size, - info.max_entries, - info.map_flags, - // name is available from 4.15. - internal.CString(info.name[:]), + MapType(info.Type), + MapID(info.Id), + info.KeySize, + info.ValueSize, + info.MaxEntries, + info.MapFlags, + unix.ByteSliceToString(info.Name[:]), }, nil } -func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) { +func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { var mi MapInfo err := scanFdInfo(fd, map[string]interface{}{ "map_type": &mi.Type, @@ -83,16 +89,21 @@ type programStats struct { type ProgramInfo struct { Type ProgramType id ProgramID - // Truncated hash of the BPF bytecode. + // Truncated hash of the BPF bytecode. Available from 4.13. Tag string - // Name as supplied by user space at load time. + // Name as supplied by user space at load time. Available from 4.15. Name string + btf btf.ID stats *programStats + + maps []MapID + insns []byte } -func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { - info, err := bpfGetProgInfoByFD(fd) +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err := sys.ObjInfo(fd, &info) if errors.Is(err, syscall.EINVAL) { return newProgramInfoFromProc(fd) } @@ -100,21 +111,43 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { return nil, err } - return &ProgramInfo{ - Type: ProgramType(info.prog_type), - id: ProgramID(info.id), - // tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD. - Tag: hex.EncodeToString(info.tag[:]), - // name is available from 4.15. - Name: internal.CString(info.name[:]), + pi := ProgramInfo{ + Type: ProgramType(info.Type), + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), stats: &programStats{ - runtime: time.Duration(info.run_time_ns), - runCount: info.run_cnt, + runtime: time.Duration(info.RunTimeNs), + runCount: info.RunCnt, }, - }, nil + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + } + + if info.NrMapIds > 0 || info.XlatedProgLen > 0 { + if err := sys.ObjInfo(fd, &info2); err != nil { + return nil, err + } + } + + return &pi, nil } -func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) { +func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { var info ProgramInfo err := scanFdInfo(fd, map[string]interface{}{ "prog_type": &info.Type, @@ -142,6 +175,18 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) { return pi.id, pi.id > 0 } +// BTFID returns the BTF ID associated with the program. +// +// The ID is only valid as long as the associated program is kept alive. +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + // RunCount returns the total number of times the program was called. // // Can return 0 if the collection of statistics is not enabled. See EnableStats(). @@ -164,13 +209,50 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) { return time.Duration(0), false } -func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { - raw, err := fd.Value() - if err != nil { - return err +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. +// +// The first instruction is marked as a symbol using the Program's name. +// +// Available from 4.13. Requires CAP_BPF or equivalent. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + var insns asm.Instructions + if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) } - fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw)) + // Tag the first instruction with the name of the program, if available. + insns[0] = insns[0].WithSymbol(pi.Name) + + return insns, nil +} + +// MapIDs returns the maps related to the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.maps, pi.maps != nil +} + +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) if err != nil { return err } @@ -213,6 +295,10 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { return err } + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + if scanned != len(fields) { return errMissingFields } @@ -227,11 +313,9 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { // // Requires at least 5.8. func EnableStats(which uint32) (io.Closer, error) { - attr := internal.BPFEnableStatsAttr{ - StatsType: which, - } - - fd, err := internal.BPFEnableStats(&attr) + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) if err != nil { return nil, err } diff --git a/agent/vendor/github.com/cilium/ebpf/internal/align.go b/agent/vendor/github.com/cilium/ebpf/internal/align.go new file mode 100644 index 00000000000..8b4f2658eac --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/align.go @@ -0,0 +1,6 @@ +package internal + +// Align returns 'n' updated to 'alignment' boundary. +func Align(n, alignment int) int { + return (int(n) + alignment - 1) / alignment * alignment +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/agent/vendor/github.com/cilium/ebpf/internal/btf/btf.go deleted file mode 100644 index 5da9e11921a..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/btf.go +++ /dev/null @@ -1,799 +0,0 @@ -package btf - -import ( - "bytes" - "debug/elf" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "reflect" - "sync" - "unsafe" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/unix" -) - -const btfMagic = 0xeB9F - -// Errors returned by BTF functions. -var ( - ErrNotSupported = internal.ErrNotSupported - ErrNotFound = errors.New("not found") - ErrNoExtendedInfo = errors.New("no extended info") -) - -// Spec represents decoded BTF. -type Spec struct { - rawTypes []rawType - strings stringTable - types []Type - namedTypes map[string][]namedType - funcInfos map[string]extInfo - lineInfos map[string]extInfo - coreRelos map[string]coreRelos - byteOrder binary.ByteOrder -} - -type btfHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - TypeOff uint32 - TypeLen uint32 - StringOff uint32 - StringLen uint32 -} - -// LoadSpecFromReader reads BTF sections from an ELF. -// -// Returns ErrNotFound if the reader contains no BTF. -func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) - if err != nil { - return nil, err - } - defer file.Close() - - btfSection, btfExtSection, sectionSizes, err := findBtfSections(file) - if err != nil { - return nil, err - } - - if btfSection == nil { - return nil, fmt.Errorf("btf: %w", ErrNotFound) - } - - symbols, err := file.Symbols() - if err != nil { - return nil, fmt.Errorf("can't read symbols: %v", err) - } - - variableOffsets := make(map[variable]uint32) - for _, symbol := range symbols { - if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { - // Ignore things like SHN_ABS - continue - } - - if int(symbol.Section) >= len(file.Sections) { - return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) - } - - secName := file.Sections[symbol.Section].Name - if _, ok := sectionSizes[secName]; !ok { - continue - } - - if symbol.Value > math.MaxUint32 { - return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) - } - - variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) - } - - spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - if btfExtSection == nil { - return spec, nil - } - - spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) - if err != nil { - return nil, fmt.Errorf("can't read ext info: %w", err) - } - - return spec, nil -} - -func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) { - var ( - btfSection *elf.Section - btfExtSection *elf.Section - sectionSizes = make(map[string]uint32) - ) - - for _, sec := range file.Sections { - switch sec.Name { - case ".BTF": - btfSection = sec - case ".BTF.ext": - btfExtSection = sec - default: - if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { - break - } - - if sec.Size > math.MaxUint32 { - return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) - } - - sectionSizes[sec.Name] = uint32(sec.Size) - } - } - return btfSection, btfExtSection, sectionSizes, nil -} - -func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) - if err != nil { - return nil, err - } - defer file.Close() - - btfSection, _, _, err := findBtfSections(file) - if err != nil { - return nil, fmt.Errorf(".BTF ELF section: %s", err) - } - if btfSection == nil { - return nil, fmt.Errorf("unable to find .BTF ELF section") - } - return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil) -} - -func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { - rawTypes, rawStrings, err := parseBTF(btf, bo) - if err != nil { - return nil, err - } - - err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - types, typesByName, err := inflateRawTypes(rawTypes, rawStrings) - if err != nil { - return nil, err - } - - return &Spec{ - rawTypes: rawTypes, - namedTypes: typesByName, - types: types, - strings: rawStrings, - byteOrder: bo, - }, nil -} - -var kernelBTF struct { - sync.Mutex - *Spec -} - -// LoadKernelSpec returns the current kernel's BTF information. -// -// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns -// ErrNotSupported if BTF is not enabled. -func LoadKernelSpec() (*Spec, error) { - kernelBTF.Lock() - defer kernelBTF.Unlock() - - if kernelBTF.Spec != nil { - return kernelBTF.Spec, nil - } - - var err error - kernelBTF.Spec, err = loadKernelSpec() - return kernelBTF.Spec, err -} - -func loadKernelSpec() (*Spec, error) { - release, err := unix.KernelRelease() - if err != nil { - return nil, fmt.Errorf("can't read kernel release number: %w", err) - } - - fh, err := os.Open("/sys/kernel/btf/vmlinux") - if err == nil { - defer fh.Close() - - return loadNakedSpec(fh, internal.NativeEndian, nil, nil) - } - - // use same list of locations as libbpf - // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 - locations := []string{ - "/boot/vmlinux-%s", - "/lib/modules/%s/vmlinux-%[1]s", - "/lib/modules/%s/build/vmlinux", - "/usr/lib/modules/%s/kernel/vmlinux", - "/usr/lib/debug/boot/vmlinux-%s", - "/usr/lib/debug/boot/vmlinux-%s.debug", - "/usr/lib/debug/lib/modules/%s/vmlinux", - } - - for _, loc := range locations { - path := fmt.Sprintf(loc, release) - - fh, err := os.Open(path) - if err != nil { - continue - } - defer fh.Close() - - return loadSpecFromVmlinux(fh) - } - - return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) -} - -func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) { - rawBTF, err := ioutil.ReadAll(btf) - if err != nil { - return nil, nil, fmt.Errorf("can't read BTF: %v", err) - } - - rd := bytes.NewReader(rawBTF) - - var header btfHeader - if err := binary.Read(rd, bo, &header); err != nil { - return nil, nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, errors.New("header is too short") - } - - if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil { - return nil, nil, fmt.Errorf("header padding: %v", err) - } - - if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err) - } - - rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen))) - if err != nil { - return nil, nil, fmt.Errorf("can't read type names: %w", err) - } - - if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil { - return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err) - } - - rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo) - if err != nil { - return nil, nil, fmt.Errorf("can't read types: %w", err) - } - - return rawTypes, rawStrings, nil -} - -type variable struct { - section string - name string -} - -func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { - for i, rawType := range rawTypes { - if rawType.Kind() != kindDatasec { - continue - } - - name, err := rawStrings.Lookup(rawType.NameOff) - if err != nil { - return err - } - - if name == ".kconfig" || name == ".ksyms" { - return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) - } - - if rawTypes[i].SizeType != 0 { - continue - } - - size, ok := sectionSizes[name] - if !ok { - return fmt.Errorf("data section %s: missing size", name) - } - - rawTypes[i].SizeType = size - - secinfos := rawType.data.([]btfVarSecinfo) - for j, secInfo := range secinfos { - id := int(secInfo.Type - 1) - if id >= len(rawTypes) { - return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) - } - - varName, err := rawStrings.Lookup(rawTypes[id].NameOff) - if err != nil { - return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) - } - - offset, ok := variableOffsets[variable{name, varName}] - if !ok { - return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) - } - - secinfos[j].Offset = offset - } - } - - return nil -} - -type marshalOpts struct { - ByteOrder binary.ByteOrder - StripFuncLinkage bool -} - -func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { - var ( - buf bytes.Buffer - header = new(btfHeader) - headerLen = binary.Size(header) - ) - - // Reserve space for the header. We have to write it last since - // we don't know the size of the type section yet. - _, _ = buf.Write(make([]byte, headerLen)) - - // Write type section, just after the header. - for _, raw := range s.rawTypes { - switch { - case opts.StripFuncLinkage && raw.Kind() == kindFunc: - raw.SetLinkage(StaticFunc) - } - - if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - } - - typeLen := uint32(buf.Len() - headerLen) - - // Write string section after type section. - _, _ = buf.Write(s.strings) - - // Fill out the header, and write it out. - header = &btfHeader{ - Magic: btfMagic, - Version: 1, - Flags: 0, - HdrLen: uint32(headerLen), - TypeOff: 0, - TypeLen: typeLen, - StringOff: typeLen, - StringLen: uint32(len(s.strings)), - } - - raw := buf.Bytes() - err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) - if err != nil { - return nil, fmt.Errorf("can't write header: %v", err) - } - - return raw, nil -} - -type sliceWriter []byte - -func (sw sliceWriter) Write(p []byte) (int, error) { - if len(p) != len(sw) { - return 0, errors.New("size doesn't match") - } - - return copy(sw, p), nil -} - -// Program finds the BTF for a specific section. -// -// Length is the number of bytes in the raw BPF instruction stream. -// -// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't -// contain extended BTF info. -func (s *Spec) Program(name string, length uint64) (*Program, error) { - if length == 0 { - return nil, errors.New("length musn't be zero") - } - - if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { - return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo) - } - - funcInfos, funcOK := s.funcInfos[name] - lineInfos, lineOK := s.lineInfos[name] - relos, coreOK := s.coreRelos[name] - - if !funcOK && !lineOK && !coreOK { - return nil, fmt.Errorf("no extended BTF info for section %s", name) - } - - return &Program{s, length, funcInfos, lineInfos, relos}, nil -} - -// Datasec returns the BTF required to create maps which represent data sections. -func (s *Spec) Datasec(name string) (*Map, error) { - var datasec Datasec - if err := s.FindType(name, &datasec); err != nil { - return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err) - } - - m := NewMap(s, &Void{}, &datasec) - return &m, nil -} - -// FindType searches for a type with a specific name. -// -// hint determines the type of the returned Type. -// -// Returns an error wrapping ErrNotFound if no matching -// type exists in spec. -func (s *Spec) FindType(name string, typ Type) error { - var ( - wanted = reflect.TypeOf(typ) - candidate Type - ) - - for _, typ := range s.namedTypes[essentialName(name)] { - if reflect.TypeOf(typ) != wanted { - continue - } - - // Match against the full name, not just the essential one. - if typ.name() != name { - continue - } - - if candidate != nil { - return fmt.Errorf("type %s: multiple candidates for %T", name, typ) - } - - candidate = typ - } - - if candidate == nil { - return fmt.Errorf("type %s: %w", name, ErrNotFound) - } - - cpy, _ := copyType(candidate, nil) - value := reflect.Indirect(reflect.ValueOf(cpy)) - reflect.Indirect(reflect.ValueOf(typ)).Set(value) - return nil -} - -// Handle is a reference to BTF loaded into the kernel. -type Handle struct { - fd *internal.FD -} - -// NewHandle loads BTF into the kernel. -// -// Returns ErrNotSupported if BTF is not supported. -func NewHandle(spec *Spec) (*Handle, error) { - if err := haveBTF(); err != nil { - return nil, err - } - - if spec.byteOrder != internal.NativeEndian { - return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) - } - - btf, err := spec.marshal(marshalOpts{ - ByteOrder: internal.NativeEndian, - StripFuncLinkage: haveFuncLinkage() != nil, - }) - if err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - - if uint64(len(btf)) > math.MaxUint32 { - return nil, errors.New("BTF exceeds the maximum size") - } - - attr := &bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - } - - fd, err := bpfLoadBTF(attr) - if err != nil { - logBuf := make([]byte, 64*1024) - attr.logBuf = internal.NewSlicePointer(logBuf) - attr.btfLogSize = uint32(len(logBuf)) - attr.btfLogLevel = 1 - _, logErr := bpfLoadBTF(attr) - return nil, internal.ErrorWithLog(err, logBuf, logErr) - } - - return &Handle{fd}, nil -} - -// Close destroys the handle. -// -// Subsequent calls to FD will return an invalid value. -func (h *Handle) Close() error { - return h.fd.Close() -} - -// FD returns the file descriptor for the handle. -func (h *Handle) FD() int { - value, err := h.fd.Value() - if err != nil { - return -1 - } - - return int(value) -} - -// Map is the BTF for a map. -type Map struct { - spec *Spec - key, value Type -} - -// NewMap returns a new Map containing the given values. -// The key and value arguments are initialized to Void if nil values are given. -func NewMap(spec *Spec, key Type, value Type) Map { - if key == nil { - key = &Void{} - } - if value == nil { - value = &Void{} - } - - return Map{ - spec: spec, - key: key, - value: value, - } -} - -// MapSpec should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapSpec(m *Map) *Spec { - return m.spec -} - -// MapKey should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapKey(m *Map) Type { - return m.key -} - -// MapValue should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapValue(m *Map) Type { - return m.value -} - -// Program is the BTF information for a stream of instructions. -type Program struct { - spec *Spec - length uint64 - funcInfos, lineInfos extInfo - coreRelos coreRelos -} - -// ProgramSpec returns the Spec needed for loading function and line infos into the kernel. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramSpec(s *Program) *Spec { - return s.spec -} - -// ProgramAppend the information from other to the Program. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramAppend(s, other *Program) error { - funcInfos, err := s.funcInfos.append(other.funcInfos, s.length) - if err != nil { - return fmt.Errorf("func infos: %w", err) - } - - lineInfos, err := s.lineInfos.append(other.lineInfos, s.length) - if err != nil { - return fmt.Errorf("line infos: %w", err) - } - - s.funcInfos = funcInfos - s.lineInfos = lineInfos - s.coreRelos = s.coreRelos.append(other.coreRelos, s.length) - s.length += other.length - return nil -} - -// ProgramFuncInfos returns the binary form of BTF function infos. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) { - bytes, err = s.funcInfos.MarshalBinary() - if err != nil { - return 0, nil, err - } - - return s.funcInfos.recordSize, bytes, nil -} - -// ProgramLineInfos returns the binary form of BTF line infos. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) { - bytes, err = s.lineInfos.MarshalBinary() - if err != nil { - return 0, nil, err - } - - return s.lineInfos.recordSize, bytes, nil -} - -// ProgramFixups returns the changes required to adjust the program to the target. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramFixups(s *Program, target *Spec) (COREFixups, error) { - if len(s.coreRelos) == 0 { - return nil, nil - } - - if target == nil { - var err error - target, err = LoadKernelSpec() - if err != nil { - return nil, err - } - } - - return coreRelocate(s.spec, target, s.coreRelos) -} - -type bpfLoadBTFAttr struct { - btf internal.Pointer - logBuf internal.Pointer - btfSize uint32 - btfLogSize uint32 - btfLogLevel uint32 -} - -func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) { - fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil -} - -func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { - const minHeaderLength = 24 - - typesLen := uint32(binary.Size(types)) - header := btfHeader{ - Magic: btfMagic, - Version: 1, - HdrLen: minHeaderLength, - TypeOff: 0, - TypeLen: typesLen, - StringOff: typesLen, - StringLen: uint32(len(strings)), - } - - buf := new(bytes.Buffer) - _ = binary.Write(buf, bo, &header) - _ = binary.Write(buf, bo, types) - buf.Write(strings) - - return buf.Bytes() -} - -var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { - var ( - types struct { - Integer btfType - Var btfType - btfVar struct{ Linkage uint32 } - } - strings = []byte{0, 'a', 0} - ) - - // We use a BTF_KIND_VAR here, to make sure that - // the kernel understands BTF at least as well as we - // do. BTF_KIND_VAR was introduced ~5.1. - types.Integer.SetKind(kindPointer) - types.Var.NameOff = 1 - types.Var.SetKind(kindVar) - types.Var.SizeType = 1 - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { - // Treat both EINVAL and EPERM as not supported: loading the program - // might still succeed without BTF. - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) - -var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { - if err := haveBTF(); err != nil { - return err - } - - var ( - types struct { - FuncProto btfType - Func btfType - } - strings = []byte{0, 'a', 0} - ) - - types.FuncProto.SetKind(kindFuncProto) - types.Func.SetKind(kindFunc) - types.Func.SizeType = 1 // aka FuncProto - types.Func.NameOff = 1 - types.Func.SetLinkage(GlobalFunc) - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := bpfLoadBTF(&bpfLoadBTFAttr{ - btf: internal.NewSlicePointer(btf), - btfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/agent/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go deleted file mode 100644 index beba1bce698..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go +++ /dev/null @@ -1,303 +0,0 @@ -package btf - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" -) - -type btfExtHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - FuncInfoOff uint32 - FuncInfoLen uint32 - LineInfoOff uint32 - LineInfoLen uint32 -} - -type btfExtCoreHeader struct { - CoreReloOff uint32 - CoreReloLen uint32 -} - -func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) { - var header btfExtHeader - var coreHeader btfExtCoreHeader - if err := binary.Read(r, bo, &header); err != nil { - return nil, nil, nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, nil, nil, errors.New("header is too short") - } - - coreHdrSize := int64(binary.Size(&coreHeader)) - if remainder >= coreHdrSize { - if err := binary.Read(r, bo, &coreHeader); err != nil { - return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err) - } - remainder -= coreHdrSize - } - - // Of course, the .BTF.ext header has different semantics than the - // .BTF ext header. We need to ignore non-null values. - _, err = io.CopyN(ioutil.Discard, r, remainder) - if err != nil { - return nil, nil, nil, fmt.Errorf("header padding: %v", err) - } - - if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err) - } - - buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen))) - funcInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("function info: %w", err) - } - - if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err) - } - - buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen))) - lineInfo, err = parseExtInfo(buf, bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("line info: %w", err) - } - - if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { - if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil { - return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err) - } - - relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings) - if err != nil { - return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err) - } - } - - return funcInfo, lineInfo, relos, nil -} - -type btfExtInfoSec struct { - SecNameOff uint32 - NumInfo uint32 -} - -type extInfoRecord struct { - InsnOff uint64 - Opaque []byte -} - -type extInfo struct { - recordSize uint32 - records []extInfoRecord -} - -func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { - if other.recordSize != ei.recordSize { - return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) - } - - records := make([]extInfoRecord, 0, len(ei.records)+len(other.records)) - records = append(records, ei.records...) - for _, info := range other.records { - records = append(records, extInfoRecord{ - InsnOff: info.InsnOff + offset, - Opaque: info.Opaque, - }) - } - return extInfo{ei.recordSize, records}, nil -} - -func (ei extInfo) MarshalBinary() ([]byte, error) { - if len(ei.records) == 0 { - return nil, nil - } - - buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records))) - for _, info := range ei.records { - // The kernel expects offsets in number of raw bpf instructions, - // while the ELF tracks it in bytes. - insnOff := uint32(info.InsnOff / asm.InstructionSize) - if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil { - return nil, fmt.Errorf("can't write instruction offset: %v", err) - } - - buf.Write(info.Opaque) - } - - return buf.Bytes(), nil -} - -func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) { - const maxRecordSize = 256 - - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("can't read record size: %v", err) - } - - if recordSize < 4 { - // Need at least insnOff - return nil, errors.New("record size too short") - } - if recordSize > maxRecordSize { - return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) - } - - result := make(map[string]extInfo) - for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - - var records []extInfoRecord - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var byteOff uint32 - if err := binary.Read(r, bo, &byteOff); err != nil { - return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err) - } - - buf := make([]byte, int(recordSize-4)) - if _, err := io.ReadFull(r, buf); err != nil { - return nil, fmt.Errorf("section %v: can't read record: %v", secName, err) - } - - if byteOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff) - } - - records = append(records, extInfoRecord{uint64(byteOff), buf}) - } - - result[secName] = extInfo{ - recordSize, - records, - } - } -} - -// bpfCoreRelo matches `struct bpf_core_relo` from the kernel -type bpfCoreRelo struct { - InsnOff uint32 - TypeID TypeID - AccessStrOff uint32 - Kind COREKind -} - -type coreRelo struct { - insnOff uint32 - typeID TypeID - accessor coreAccessor - kind COREKind -} - -type coreRelos []coreRelo - -// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted -// by offset. -func (r coreRelos) append(other coreRelos, offset uint64) coreRelos { - result := make([]coreRelo, 0, len(r)+len(other)) - result = append(result, r...) - for _, relo := range other { - relo.insnOff += uint32(offset) - result = append(result, relo) - } - return result -} - -var extInfoReloSize = binary.Size(bpfCoreRelo{}) - -func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) { - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return nil, fmt.Errorf("read record size: %v", err) - } - - if recordSize != uint32(extInfoReloSize) { - return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) - } - - result := make(map[string]coreRelos) - for { - secName, infoHeader, err := parseExtInfoHeader(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - - var relos coreRelos - for i := uint32(0); i < infoHeader.NumInfo; i++ { - var relo bpfCoreRelo - if err := binary.Read(r, bo, &relo); err != nil { - return nil, fmt.Errorf("section %v: read record: %v", secName, err) - } - - if relo.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff) - } - - accessorStr, err := strings.Lookup(relo.AccessStrOff) - if err != nil { - return nil, err - } - - accessor, err := parseCoreAccessor(accessorStr) - if err != nil { - return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) - } - - relos = append(relos, coreRelo{ - relo.InsnOff, - relo.TypeID, - accessor, - relo.Kind, - }) - } - - result[secName] = relos - } -} - -func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { - var infoHeader btfExtInfoSec - if err := binary.Read(r, bo, &infoHeader); err != nil { - return "", nil, fmt.Errorf("read ext info header: %w", err) - } - - secName, err := strings.Lookup(infoHeader.SecNameOff) - if err != nil { - return "", nil, fmt.Errorf("get section name: %w", err) - } - - if infoHeader.NumInfo == 0 { - return "", nil, fmt.Errorf("section %s has zero records", secName) - } - - return secName, &infoHeader, nil -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/agent/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go deleted file mode 100644 index 37e043fd378..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build gofuzz - -// Use with https://github.com/dvyukov/go-fuzz - -package btf - -import ( - "bytes" - "encoding/binary" - - "github.com/cilium/ebpf/internal" -) - -func FuzzSpec(data []byte) int { - if len(data) < binary.Size(btfHeader{}) { - return -1 - } - - spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil) - if err != nil { - if spec != nil { - panic("spec is not nil") - } - return 0 - } - if spec == nil { - panic("spec is nil") - } - return 1 -} - -func FuzzExtInfo(data []byte) int { - if len(data) < binary.Size(btfExtHeader{}) { - return -1 - } - - table := stringTable("\x00foo\x00barfoo\x00") - info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table) - if err != nil { - if info != nil { - panic("info is not nil") - } - return 0 - } - if info == nil { - panic("info is nil") - } - return 1 -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/agent/vendor/github.com/cilium/ebpf/internal/btf/strings.go deleted file mode 100644 index 8782643a043..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/strings.go +++ /dev/null @@ -1,60 +0,0 @@ -package btf - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" -) - -type stringTable []byte - -func readStringTable(r io.Reader) (stringTable, error) { - contents, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("can't read string table: %v", err) - } - - if len(contents) < 1 { - return nil, errors.New("string table is empty") - } - - if contents[0] != '\x00' { - return nil, errors.New("first item in string table is non-empty") - } - - if contents[len(contents)-1] != '\x00' { - return nil, errors.New("string table isn't null terminated") - } - - return stringTable(contents), nil -} - -func (st stringTable) Lookup(offset uint32) (string, error) { - if int64(offset) > int64(^uint(0)>>1) { - return "", fmt.Errorf("offset %d overflows int", offset) - } - - pos := int(offset) - if pos >= len(st) { - return "", fmt.Errorf("offset %d is out of bounds", offset) - } - - if pos > 0 && st[pos-1] != '\x00' { - return "", fmt.Errorf("offset %d isn't start of a string", offset) - } - - str := st[pos:] - end := bytes.IndexByte(str, '\x00') - if end == -1 { - return "", fmt.Errorf("offset %d isn't null terminated", offset) - } - - return string(str[:end]), nil -} - -func (st stringTable) LookupName(offset uint32) (Name, error) { - str, err := st.Lookup(offset) - return Name(str), err -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/btf/types.go b/agent/vendor/github.com/cilium/ebpf/internal/btf/types.go deleted file mode 100644 index 62aa31bcd78..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/btf/types.go +++ /dev/null @@ -1,893 +0,0 @@ -package btf - -import ( - "fmt" - "math" - "strings" -) - -const maxTypeDepth = 32 - -// TypeID identifies a type in a BTF section. -type TypeID uint32 - -// ID implements part of the Type interface. -func (tid TypeID) ID() TypeID { - return tid -} - -// Type represents a type described by BTF. -type Type interface { - ID() TypeID - - String() string - - // Make a copy of the type, without copying Type members. - copy() Type - - // Enumerate all nested Types. Repeated calls must visit nested - // types in the same order. - walk(*typeDeque) -} - -// namedType is a type with a name. -// -// Most named types simply embed Name. -type namedType interface { - Type - name() string - essentialName() string -} - -// Name identifies a type. -// -// Anonymous types have an empty name. -type Name string - -func (n Name) name() string { - return string(n) -} - -func (n Name) essentialName() string { - return essentialName(string(n)) -} - -// Void is the unit type of BTF. -type Void struct{} - -func (v *Void) ID() TypeID { return 0 } -func (v *Void) String() string { return "void#0" } -func (v *Void) size() uint32 { return 0 } -func (v *Void) copy() Type { return (*Void)(nil) } -func (v *Void) walk(*typeDeque) {} - -type IntEncoding byte - -const ( - Signed IntEncoding = 1 << iota - Char - Bool -) - -// Int is an integer of a given length. -type Int struct { - TypeID - Name - - // The size of the integer in bytes. - Size uint32 - Encoding IntEncoding - // Offset is the starting bit offset. Currently always 0. - // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int - Offset uint32 - Bits byte -} - -var _ namedType = (*Int)(nil) - -func (i *Int) String() string { - var s strings.Builder - - switch { - case i.Encoding&Char != 0: - s.WriteString("char") - case i.Encoding&Bool != 0: - s.WriteString("bool") - default: - if i.Encoding&Signed == 0 { - s.WriteRune('u') - } - s.WriteString("int") - fmt.Fprintf(&s, "%d", i.Size*8) - } - - fmt.Fprintf(&s, "#%d", i.TypeID) - - if i.Bits > 0 { - fmt.Fprintf(&s, "[bits=%d]", i.Bits) - } - - return s.String() -} - -func (i *Int) size() uint32 { return i.Size } -func (i *Int) walk(*typeDeque) {} -func (i *Int) copy() Type { - cpy := *i - return &cpy -} - -func (i *Int) isBitfield() bool { - return i.Offset > 0 -} - -// Pointer is a pointer to another type. -type Pointer struct { - TypeID - Target Type -} - -func (p *Pointer) String() string { - return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) -} - -func (p *Pointer) size() uint32 { return 8 } -func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } -func (p *Pointer) copy() Type { - cpy := *p - return &cpy -} - -// Array is an array with a fixed number of elements. -type Array struct { - TypeID - Type Type - Nelems uint32 -} - -func (arr *Array) String() string { - return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) -} - -func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } -func (arr *Array) copy() Type { - cpy := *arr - return &cpy -} - -// Struct is a compound type of consecutive members. -type Struct struct { - TypeID - Name - // The size of the struct including padding, in bytes - Size uint32 - Members []Member -} - -func (s *Struct) String() string { - return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) -} - -func (s *Struct) size() uint32 { return s.Size } - -func (s *Struct) walk(tdq *typeDeque) { - for i := range s.Members { - tdq.push(&s.Members[i].Type) - } -} - -func (s *Struct) copy() Type { - cpy := *s - cpy.Members = copyMembers(s.Members) - return &cpy -} - -func (s *Struct) members() []Member { - return s.Members -} - -// Union is a compound type where members occupy the same memory. -type Union struct { - TypeID - Name - // The size of the union including padding, in bytes. - Size uint32 - Members []Member -} - -func (u *Union) String() string { - return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) -} - -func (u *Union) size() uint32 { return u.Size } - -func (u *Union) walk(tdq *typeDeque) { - for i := range u.Members { - tdq.push(&u.Members[i].Type) - } -} - -func (u *Union) copy() Type { - cpy := *u - cpy.Members = copyMembers(u.Members) - return &cpy -} - -func (u *Union) members() []Member { - return u.Members -} - -func copyMembers(orig []Member) []Member { - cpy := make([]Member, len(orig)) - copy(cpy, orig) - return cpy -} - -type composite interface { - members() []Member -} - -var ( - _ composite = (*Struct)(nil) - _ composite = (*Union)(nil) -) - -// Member is part of a Struct or Union. -// -// It is not a valid Type. -type Member struct { - Name - Type Type - // Offset is the bit offset of this member - Offset uint32 - BitfieldSize uint32 -} - -// Enum lists possible values. -type Enum struct { - TypeID - Name - Values []EnumValue -} - -func (e *Enum) String() string { - return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) -} - -// EnumValue is part of an Enum -// -// Is is not a valid Type -type EnumValue struct { - Name - Value int32 -} - -func (e *Enum) size() uint32 { return 4 } -func (e *Enum) walk(*typeDeque) {} -func (e *Enum) copy() Type { - cpy := *e - cpy.Values = make([]EnumValue, len(e.Values)) - copy(cpy.Values, e.Values) - return &cpy -} - -// FwdKind is the type of forward declaration. -type FwdKind int - -// Valid types of forward declaration. -const ( - FwdStruct FwdKind = iota - FwdUnion -) - -func (fk FwdKind) String() string { - switch fk { - case FwdStruct: - return "struct" - case FwdUnion: - return "union" - default: - return fmt.Sprintf("%T(%d)", fk, int(fk)) - } -} - -// Fwd is a forward declaration of a Type. -type Fwd struct { - TypeID - Name - Kind FwdKind -} - -func (f *Fwd) String() string { - return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) -} - -func (f *Fwd) walk(*typeDeque) {} -func (f *Fwd) copy() Type { - cpy := *f - return &cpy -} - -// Typedef is an alias of a Type. -type Typedef struct { - TypeID - Name - Type Type -} - -func (td *Typedef) String() string { - return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) -} - -func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } -func (td *Typedef) copy() Type { - cpy := *td - return &cpy -} - -// Volatile is a qualifier. -type Volatile struct { - TypeID - Type Type -} - -func (v *Volatile) String() string { - return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) -} - -func (v *Volatile) qualify() Type { return v.Type } -func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Volatile) copy() Type { - cpy := *v - return &cpy -} - -// Const is a qualifier. -type Const struct { - TypeID - Type Type -} - -func (c *Const) String() string { - return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) -} - -func (c *Const) qualify() Type { return c.Type } -func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } -func (c *Const) copy() Type { - cpy := *c - return &cpy -} - -// Restrict is a qualifier. -type Restrict struct { - TypeID - Type Type -} - -func (r *Restrict) String() string { - return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) -} - -func (r *Restrict) qualify() Type { return r.Type } -func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } -func (r *Restrict) copy() Type { - cpy := *r - return &cpy -} - -// Func is a function definition. -type Func struct { - TypeID - Name - Type Type - Linkage FuncLinkage -} - -func (f *Func) String() string { - return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID()) -} - -func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } -func (f *Func) copy() Type { - cpy := *f - return &cpy -} - -// FuncProto is a function declaration. -type FuncProto struct { - TypeID - Return Type - Params []FuncParam -} - -func (fp *FuncProto) String() string { - var s strings.Builder - fmt.Fprintf(&s, "proto#%d[", fp.TypeID) - for _, param := range fp.Params { - fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID()) - } - fmt.Fprintf(&s, "return=#%d]", fp.Return.ID()) - return s.String() -} - -func (fp *FuncProto) walk(tdq *typeDeque) { - tdq.push(&fp.Return) - for i := range fp.Params { - tdq.push(&fp.Params[i].Type) - } -} - -func (fp *FuncProto) copy() Type { - cpy := *fp - cpy.Params = make([]FuncParam, len(fp.Params)) - copy(cpy.Params, fp.Params) - return &cpy -} - -type FuncParam struct { - Name - Type Type -} - -// Var is a global variable. -type Var struct { - TypeID - Name - Type Type - Linkage VarLinkage -} - -func (v *Var) String() string { - return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name) -} - -func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Var) copy() Type { - cpy := *v - return &cpy -} - -// Datasec is a global program section containing data. -type Datasec struct { - TypeID - Name - Size uint32 - Vars []VarSecinfo -} - -func (ds *Datasec) String() string { - return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) -} - -func (ds *Datasec) size() uint32 { return ds.Size } - -func (ds *Datasec) walk(tdq *typeDeque) { - for i := range ds.Vars { - tdq.push(&ds.Vars[i].Type) - } -} - -func (ds *Datasec) copy() Type { - cpy := *ds - cpy.Vars = make([]VarSecinfo, len(ds.Vars)) - copy(cpy.Vars, ds.Vars) - return &cpy -} - -// VarSecinfo describes variable in a Datasec -// -// It is not a valid Type. -type VarSecinfo struct { - Type Type - Offset uint32 - Size uint32 -} - -type sizer interface { - size() uint32 -} - -var ( - _ sizer = (*Int)(nil) - _ sizer = (*Pointer)(nil) - _ sizer = (*Struct)(nil) - _ sizer = (*Union)(nil) - _ sizer = (*Enum)(nil) - _ sizer = (*Datasec)(nil) -) - -type qualifier interface { - qualify() Type -} - -var ( - _ qualifier = (*Const)(nil) - _ qualifier = (*Restrict)(nil) - _ qualifier = (*Volatile)(nil) -) - -// Sizeof returns the size of a type in bytes. -// -// Returns an error if the size can't be computed. -func Sizeof(typ Type) (int, error) { - var ( - n = int64(1) - elem int64 - ) - - for i := 0; i < maxTypeDepth; i++ { - switch v := typ.(type) { - case *Array: - if n > 0 && int64(v.Nelems) > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - // Arrays may be of zero length, which allows - // n to be zero as well. - n *= int64(v.Nelems) - typ = v.Type - continue - - case sizer: - elem = int64(v.size()) - - case *Typedef: - typ = v.Type - continue - - case qualifier: - typ = v.qualify() - continue - - default: - return 0, fmt.Errorf("unsized type %T", typ) - } - - if n > 0 && elem > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - size := n * elem - if int64(int(size)) != size { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - return int(size), nil - } - - return 0, fmt.Errorf("type %s: exceeded type depth", typ) -} - -// copy a Type recursively. -// -// typ may form a cycle. -// -// Returns any errors from transform verbatim. -func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { - var ( - copies = make(map[Type]Type) - work typeDeque - ) - - for t := &typ; t != nil; t = work.pop() { - // *t is the identity of the type. - if cpy := copies[*t]; cpy != nil { - *t = cpy - continue - } - - var cpy Type - if transform != nil { - tf, err := transform(*t) - if err != nil { - return nil, fmt.Errorf("copy %s: %w", typ, err) - } - cpy = tf.copy() - } else { - cpy = (*t).copy() - } - - copies[*t] = cpy - *t = cpy - - // Mark any nested types for copying. - cpy.walk(&work) - } - - return typ, nil -} - -// typeDeque keeps track of pointers to types which still -// need to be visited. -type typeDeque struct { - types []*Type - read, write uint64 - mask uint64 -} - -// push adds a type to the stack. -func (dq *typeDeque) push(t *Type) { - if dq.write-dq.read < uint64(len(dq.types)) { - dq.types[dq.write&dq.mask] = t - dq.write++ - return - } - - new := len(dq.types) * 2 - if new == 0 { - new = 8 - } - - types := make([]*Type, new) - pivot := dq.read & dq.mask - n := copy(types, dq.types[pivot:]) - n += copy(types[n:], dq.types[:pivot]) - types[n] = t - - dq.types = types - dq.mask = uint64(new) - 1 - dq.read, dq.write = 0, uint64(n+1) -} - -// shift returns the first element or null. -func (dq *typeDeque) shift() *Type { - if dq.read == dq.write { - return nil - } - - index := dq.read & dq.mask - t := dq.types[index] - dq.types[index] = nil - dq.read++ - return t -} - -// pop returns the last element or null. -func (dq *typeDeque) pop() *Type { - if dq.read == dq.write { - return nil - } - - dq.write-- - index := dq.write & dq.mask - t := dq.types[index] - dq.types[index] = nil - return t -} - -// all returns all elements. -// -// The deque is empty after calling this method. -func (dq *typeDeque) all() []*Type { - length := dq.write - dq.read - types := make([]*Type, 0, length) - for t := dq.shift(); t != nil; t = dq.shift() { - types = append(types, t) - } - return types -} - -// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns -// it into a graph of Types connected via pointers. -// -// Returns a map of named types (so, where NameOff is non-zero) and a slice of types -// indexed by TypeID. Since BTF ignores compilation units, multiple types may share -// the same name. A Type may form a cyclic graph by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) { - type fixupDef struct { - id TypeID - expectedKind btfKind - typ *Type - } - - var fixups []fixupDef - fixup := func(id TypeID, expectedKind btfKind, typ *Type) { - fixups = append(fixups, fixupDef{id, expectedKind, typ}) - } - - convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { - // NB: The fixup below relies on pre-allocating this array to - // work, since otherwise append might re-allocate members. - members := make([]Member, 0, len(raw)) - for i, btfMember := range raw { - name, err := rawStrings.LookupName(btfMember.NameOff) - if err != nil { - return nil, fmt.Errorf("can't get name for member %d: %w", i, err) - } - m := Member{ - Name: name, - Offset: btfMember.Offset, - } - if kindFlag { - m.BitfieldSize = btfMember.Offset >> 24 - m.Offset &= 0xffffff - } - members = append(members, m) - } - for i := range members { - fixup(raw[i].Type, kindUnknown, &members[i].Type) - } - return members, nil - } - - types = make([]Type, 0, len(rawTypes)) - types = append(types, (*Void)(nil)) - namedTypes = make(map[string][]namedType) - - for i, raw := range rawTypes { - var ( - // Void is defined to always be type ID 0, and is thus - // omitted from BTF. - id = TypeID(i + 1) - typ Type - ) - - name, err := rawStrings.LookupName(raw.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) - } - - switch raw.Kind() { - case kindInt: - encoding, offset, bits := intEncoding(*raw.data.(*uint32)) - typ = &Int{id, name, raw.Size(), encoding, offset, bits} - - case kindPointer: - ptr := &Pointer{id, nil} - fixup(raw.Type(), kindUnknown, &ptr.Target) - typ = ptr - - case kindArray: - btfArr := raw.data.(*btfArray) - - // IndexType is unused according to btf.rst. - // Don't make it available right now. - arr := &Array{id, nil, btfArr.Nelems} - fixup(btfArr.Type, kindUnknown, &arr.Type) - typ = arr - - case kindStruct: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) - } - typ = &Struct{id, name, raw.Size(), members} - - case kindUnion: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err) - } - typ = &Union{id, name, raw.Size(), members} - - case kindEnum: - rawvals := raw.data.([]btfEnum) - vals := make([]EnumValue, 0, len(rawvals)) - for i, btfVal := range rawvals { - name, err := rawStrings.LookupName(btfVal.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) - } - vals = append(vals, EnumValue{ - Name: name, - Value: btfVal.Val, - }) - } - typ = &Enum{id, name, vals} - - case kindForward: - if raw.KindFlag() { - typ = &Fwd{id, name, FwdUnion} - } else { - typ = &Fwd{id, name, FwdStruct} - } - - case kindTypedef: - typedef := &Typedef{id, name, nil} - fixup(raw.Type(), kindUnknown, &typedef.Type) - typ = typedef - - case kindVolatile: - volatile := &Volatile{id, nil} - fixup(raw.Type(), kindUnknown, &volatile.Type) - typ = volatile - - case kindConst: - cnst := &Const{id, nil} - fixup(raw.Type(), kindUnknown, &cnst.Type) - typ = cnst - - case kindRestrict: - restrict := &Restrict{id, nil} - fixup(raw.Type(), kindUnknown, &restrict.Type) - typ = restrict - - case kindFunc: - fn := &Func{id, name, nil, raw.Linkage()} - fixup(raw.Type(), kindFuncProto, &fn.Type) - typ = fn - - case kindFuncProto: - rawparams := raw.data.([]btfParam) - params := make([]FuncParam, 0, len(rawparams)) - for i, param := range rawparams { - name, err := rawStrings.LookupName(param.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) - } - params = append(params, FuncParam{ - Name: name, - }) - } - for i := range params { - fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type) - } - - fp := &FuncProto{id, nil, params} - fixup(raw.Type(), kindUnknown, &fp.Return) - typ = fp - - case kindVar: - variable := raw.data.(*btfVariable) - v := &Var{id, name, nil, VarLinkage(variable.Linkage)} - fixup(raw.Type(), kindUnknown, &v.Type) - typ = v - - case kindDatasec: - btfVars := raw.data.([]btfVarSecinfo) - vars := make([]VarSecinfo, 0, len(btfVars)) - for _, btfVar := range btfVars { - vars = append(vars, VarSecinfo{ - Offset: btfVar.Offset, - Size: btfVar.Size, - }) - } - for i := range vars { - fixup(btfVars[i].Type, kindVar, &vars[i].Type) - } - typ = &Datasec{id, name, raw.SizeType, vars} - - default: - return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) - } - - types = append(types, typ) - - if named, ok := typ.(namedType); ok { - if name := essentialName(named.name()); name != "" { - namedTypes[name] = append(namedTypes[name], named) - } - } - } - - for _, fixup := range fixups { - i := int(fixup.id) - if i >= len(types) { - return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) - } - - // Default void (id 0) to unknown - rawKind := kindUnknown - if i > 0 { - rawKind = rawTypes[i-1].Kind() - } - - if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected { - return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind) - } - - *fixup.typ = types[i] - } - - return types, namedTypes, nil -} - -// essentialName returns name without a ___ suffix. -func essentialName(name string) string { - lastIdx := strings.LastIndex(name, "___") - if lastIdx > 0 { - return name[:lastIdx] - } - return name -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/cpu.go b/agent/vendor/github.com/cilium/ebpf/internal/cpu.go index d3424ba4345..3affa1efb9d 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/cpu.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/cpu.go @@ -2,7 +2,7 @@ package internal import ( "fmt" - "io/ioutil" + "os" "strings" "sync" ) @@ -24,7 +24,7 @@ func PossibleCPUs() (int, error) { } func parseCPUsFromFile(path string) (int, error) { - spec, err := ioutil.ReadFile(path) + spec, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/agent/vendor/github.com/cilium/ebpf/internal/elf.go b/agent/vendor/github.com/cilium/ebpf/internal/elf.go index 54a4313130a..011581938d9 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/elf.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/elf.go @@ -35,6 +35,29 @@ func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { return &SafeELFFile{file}, nil } +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + // Symbols is the safe version of elf.File.Symbols. func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { defer func() { @@ -66,3 +89,14 @@ func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { syms, err = se.File.DynamicSymbols() return } + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/endian.go b/agent/vendor/github.com/cilium/ebpf/internal/endian.go deleted file mode 100644 index 6ae99fcd5f3..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/endian.go +++ /dev/null @@ -1,29 +0,0 @@ -package internal - -import ( - "encoding/binary" - "unsafe" -) - -// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, -// depending on the host's endianness. -var NativeEndian binary.ByteOrder - -// Clang is set to either "el" or "eb" depending on the host's endianness. -var ClangEndian string - -func init() { - if isBigEndian() { - NativeEndian = binary.BigEndian - ClangEndian = "eb" - } else { - NativeEndian = binary.LittleEndian - ClangEndian = "el" - } -} - -func isBigEndian() (ret bool) { - i := int(0x1) - bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) - return bs[0] == 0 -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/endian_be.go b/agent/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 00000000000..ad33cda8511 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,13 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 +// +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian binary.ByteOrder = binary.BigEndian + +// ClangEndian is set to either "el" or "eb" depending on the host's endianness. +const ClangEndian = "eb" diff --git a/agent/vendor/github.com/cilium/ebpf/internal/endian_le.go b/agent/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 00000000000..41a68224c83 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,13 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 +// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian binary.ByteOrder = binary.LittleEndian + +// ClangEndian is set to either "el" or "eb" depending on the host's endianness. +const ClangEndian = "el" diff --git a/agent/vendor/github.com/cilium/ebpf/internal/errors.go b/agent/vendor/github.com/cilium/ebpf/internal/errors.go index 877bd72ee26..b5ccdd7d053 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/errors.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/errors.go @@ -2,50 +2,205 @@ package internal import ( "bytes" - "errors" "fmt" + "io" "strings" - - "github.com/cilium/ebpf/internal/unix" ) -// ErrorWithLog returns an error that includes logs from the -// kernel verifier. +// ErrorWithLog returns an error which includes logs from the kernel verifier. +// +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. // -// logErr should be the error returned by the syscall that generated -// the log. It is used to check for truncation of the output. -func ErrorWithLog(err error, log []byte, logErr error) error { - logStr := strings.Trim(CString(log), "\t\r\n ") - if errors.Is(logErr, unix.ENOSPC) { - logStr += " (truncated...)" +// A set of heuristics is used to determine whether the log has been truncated. +func ErrorWithLog(err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + truncated := false + if i := bytes.IndexByte(log, 0); i != -1 { + if i == len(log)-1 && !bytes.HasSuffix(log[:i], []byte{'\n'}) { + // The null byte is at the end of the buffer and it's not preceded + // by a newline character. Most likely the buffer was too short. + truncated = true + } + + log = log[:i] + } else if len(log) > 0 { + // No null byte? Dodgy! + truncated = true + } + + log = bytes.Trim(log, whitespace) + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) } - return &VerifierError{err, logStr} + return &VerifierError{err, lines, truncated} } // VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. type VerifierError struct { - cause error - log string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string + // Whether the log output is truncated, based on several heuristics. + Truncated bool } func (le *VerifierError) Unwrap() error { - return le.cause + return le.Cause } func (le *VerifierError) Error() string { - if le.log == "" { - return le.cause.Error() + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + n := len(log) + if n == 0 { + return le.Cause.Error() + } + + lines := log[n-1:] + if n >= 2 && (includePreviousLine(log[n-1]) || le.Truncated) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: ", le.Cause.Error()) + + for i, line := range lines { + b.WriteString(strings.TrimSpace(line)) + if i != len(lines)-1 { + b.WriteString(": ") + } + } + + omitted := len(le.Log) - len(lines) + if omitted == 0 && !le.Truncated { + return b.String() + } + + b.WriteString(" (") + if le.Truncated { + b.WriteString("truncated") + } + + if omitted > 0 { + if le.Truncated { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%d line(s) omitted", omitted) + } + b.WriteString(")") + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true } - return fmt.Sprintf("%s: %s", le.cause, le.log) + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true + } + + return false } -// CString turns a NUL / zero terminated byte buffer into a string. -func CString(in []byte) string { - inLen := bytes.IndexByte(in, 0) - if inLen == -1 { - return "" +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// + Output the first lines, or all lines if no width is given. +// - Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s:", le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + if le.Truncated { + fmt.Fprintf(f, "\n\t(truncated)") + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) } - return string(in[:inLen]) } diff --git a/agent/vendor/github.com/cilium/ebpf/internal/fd.go b/agent/vendor/github.com/cilium/ebpf/internal/fd.go deleted file mode 100644 index af04955bd53..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/fd.go +++ /dev/null @@ -1,69 +0,0 @@ -package internal - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "github.com/cilium/ebpf/internal/unix" -) - -var ErrClosedFd = errors.New("use of closed file descriptor") - -type FD struct { - raw int64 -} - -func NewFD(value uint32) *FD { - fd := &FD{int64(value)} - runtime.SetFinalizer(fd, (*FD).Close) - return fd -} - -func (fd *FD) String() string { - return strconv.FormatInt(fd.raw, 10) -} - -func (fd *FD) Value() (uint32, error) { - if fd.raw < 0 { - return 0, ErrClosedFd - } - - return uint32(fd.raw), nil -} - -func (fd *FD) Close() error { - if fd.raw < 0 { - return nil - } - - value := int(fd.raw) - fd.raw = -1 - - fd.Forget() - return unix.Close(value) -} - -func (fd *FD) Forget() { - runtime.SetFinalizer(fd, nil) -} - -func (fd *FD) Dup() (*FD, error) { - if fd.raw < 0 { - return nil, ErrClosedFd - } - - dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, fmt.Errorf("can't dup fd: %v", err) - } - - return NewFD(uint32(dup)), nil -} - -func (fd *FD) File(name string) *os.File { - fd.Forget() - return os.NewFile(uintptr(fd.raw), name) -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/feature.go b/agent/vendor/github.com/cilium/ebpf/internal/feature.go index c94a2e1ee01..0a6c2d1d528 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/feature.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/feature.go @@ -54,11 +54,6 @@ type FeatureTestFn func() error // // Returns an error wrapping ErrNotSupported if the feature is not supported. func FeatureTest(name, version string, fn FeatureTestFn) func() error { - v, err := NewVersion(version) - if err != nil { - return func() error { return err } - } - ft := new(featureTest) return func() error { ft.RLock() @@ -79,6 +74,11 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error { err := fn() switch { case errors.Is(err, ErrNotSupported): + v, err := NewVersion(version) + if err != nil { + return err + } + ft.result = &UnsupportedFeatureError{ MinimumVersion: v, Name: name, diff --git a/agent/vendor/github.com/cilium/ebpf/internal/io.go b/agent/vendor/github.com/cilium/ebpf/internal/io.go index fa7402782d7..30b6641f076 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/io.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/io.go @@ -1,6 +1,35 @@ package internal -import "errors" +import ( + "bufio" + "compress/gzip" + "errors" + "io" + "os" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} // DiscardZeroes makes sure that all written bytes are zero // before discarding them. @@ -14,3 +43,20 @@ func (DiscardZeroes) Write(p []byte) (int, error) { } return len(p), nil } + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/output.go b/agent/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 00000000000..aeab37fcfaf --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,84 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/pinning.go b/agent/vendor/github.com/cilium/ebpf/internal/pinning.go index 5329b432d72..c711353c3ea 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/pinning.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/pinning.go @@ -4,24 +4,54 @@ import ( "errors" "fmt" "os" + "path/filepath" + "runtime" + "unsafe" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) -func Pin(currentPath, newPath string, fd *FD) error { +func Pin(currentPath, newPath string, fd *sys.FD) error { + const bpfFSType = 0xcafe4a11 + if newPath == "" { return errors.New("given pinning path cannot be empty") } if currentPath == newPath { return nil } + + var statfs unix.Statfs_t + if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil { + return err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + + if fsType != bpfFSType { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + if currentPath == "" { - return BPFObjPin(newPath, fd) + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) } - var err error + // Renameat2 is used instead of os.Rename to disallow the new path replacing // an existing path. - if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil { + err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { // Object is now moved to the new pinning path. return nil } @@ -29,7 +59,10 @@ func Pin(currentPath, newPath string, fd *FD) error { return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) } // Internal state not in sync with the file system so let's fix it. - return BPFObjPin(newPath, fd) + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) } func Unpin(pinnedPath string) error { diff --git a/agent/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/agent/vendor/github.com/cilium/ebpf/internal/ptr_64.go deleted file mode 100644 index 69452dceb9a..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/ptr_64.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le -// +build !armbe,!mips,!mips64p32 - -package internal - -import ( - "unsafe" -) - -// Pointer wraps an unsafe.Pointer to be 64bit to -// conform to the syscall specification. -type Pointer struct { - ptr unsafe.Pointer -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 00000000000..dfe174448e1 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,6 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/agent/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 00000000000..65517d45e26 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,96 @@ +package sys + +import ( + "fmt" + "math" + "os" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +type FD struct { + raw int +} + +func newFD(value int) *FD { + fd := &FD{value} + runtime.SetFinalizer(fd, (*FD).Close) + return fd +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return fd.raw +} + +func (fd *FD) Uint() uint32 { + if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + value := int(fd.raw) + fd.raw = -1 + + fd.Forget() + return unix.Close(value) +} + +func (fd *FD) Forget() { + runtime.SetFinalizer(fd, nil) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +func (fd *FD) File(name string) *os.File { + fd.Forget() + return os.NewFile(uintptr(fd.raw), name) +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/ptr.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr.go similarity index 71% rename from agent/vendor/github.com/cilium/ebpf/internal/ptr.go rename to agent/vendor/github.com/cilium/ebpf/internal/sys/ptr.go index f295de72cfe..a221006888d 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/ptr.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -1,4 +1,4 @@ -package internal +package sys import ( "unsafe" @@ -20,6 +20,13 @@ func NewSlicePointer(buf []byte) Pointer { return Pointer{ptr: unsafe.Pointer(&buf[0])} } +// NewSlicePointer creates a 64-bit pointer from a byte slice. +// +// Useful to assign both the pointer and the length in one go. +func NewSlicePointerLen(buf []byte) (Pointer, uint32) { + return NewSlicePointer(buf), uint32(len(buf)) +} + // NewStringPointer creates a 64-bit pointer from a string. func NewStringPointer(str string) Pointer { p, err := unix.BytePtrFromString(str) diff --git a/agent/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go similarity index 80% rename from agent/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go rename to agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go index a56fbcc8e01..df903d780b1 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -1,6 +1,7 @@ +//go:build armbe || mips || mips64p32 // +build armbe mips mips64p32 -package internal +package sys import ( "unsafe" diff --git a/agent/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go similarity index 75% rename from agent/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go rename to agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go index be2ecfca731..a6a51edb6e1 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -1,6 +1,7 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le // +build 386 amd64p32 arm mipsle mips64p32le -package internal +package sys import ( "unsafe" diff --git a/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go new file mode 100644 index 00000000000..7c0279e487c --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -0,0 +1,14 @@ +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 +// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 00000000000..2a5935dc912 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,126 @@ +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: NewPointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [unix.BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +type syscallError struct { + error + errno syscall.Errno +} + +func Error(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/sys/types.go b/agent/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 00000000000..291e3a6196c --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,1052 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "unsafe" +) + +type AdjRoomMode int32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType int32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + __MAX_BPF_ATTACH_TYPE AttachType = 43 +) + +type Cmd int32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 +) + +type FunctionId int32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + __BPF_FUNC_MAX_ID FunctionId = 194 +) + +type HdrStartOff int32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType int32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + MAX_BPF_LINK_TYPE LinkType = 9 +) + +type MapType int32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 +) + +type ProgType int32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 +) + +type RetCode int32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 +) + +type SkAction int32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus int32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType int32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type XdpAction int32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + Btf Pointer + BtfSize uint32 + Id BTFID + Name Pointer + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [16]uint8 +} + +type MapInfo struct { + Type uint32 + Id uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId uint32 + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId uint32 + BtfValueTypeId uint32 + _ [4]byte + MapExtra uint64 +} + +type ProgInfo struct { + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns uint64 + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms uint64 + JitedFuncLens uint64 + BtfId uint32 + FuncInfoRecSize uint32 + FuncInfo uint64 + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo uint64 + JitedLineInfo uint64 + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 + VerifiedInsns uint32 + _ [4]byte +} + +type SkLookup struct { + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct{ Id uint32 } + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfGetNextIdAttr struct { + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BtfLoadAttr struct { + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + _ [4]byte +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct{ Type uint32 } + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId uint32 + _ [28]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [20]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [24]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkUpdateAttr struct { + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId uint32 + BtfValueTypeId uint32 + BtfVmlinuxValueTypeId uint32 + MapExtra uint64 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct{ MapFd uint32 } + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct{ Id uint32 } + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct{ Id uint32 } + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + ProgType ProgType + InsnCnt uint32 + Insns Pointer + License Pointer + LogLevel uint32 + LogSize uint32 + LogBuf Pointer + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBtfId uint32 + AttachProgFd uint32 + CoreReloCnt uint32 + FdArray Pointer + CoreRelos Pointer + CoreReloRecSize uint32 + _ [4]byte +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgRunAttr struct { + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn Pointer + DataOut Pointer + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn Pointer + CtxOut Pointer + Flags uint32 + Cpu uint32 + BatchSize uint32 + _ [4]byte +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + Name Pointer + ProgFd uint32 + _ [4]byte +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + CgroupId uint64 + AttachType AttachType + _ [4]byte +} + +type IterLinkInfo struct { + TargetName Pointer + TargetNameLen uint32 +} + +type NetNsLinkInfo struct { + NetnsIno uint32 + AttachType AttachType +} + +type RawTracepointLinkInfo struct { + TpName Pointer + TpNameLen uint32 + _ [4]byte +} + +type TracingLinkInfo struct { + AttachType AttachType + TargetObjId uint32 + TargetBtfId uint32 +} + +type XDPLinkInfo struct{ Ifindex uint32 } diff --git a/agent/vendor/github.com/cilium/ebpf/internal/syscall.go b/agent/vendor/github.com/cilium/ebpf/internal/syscall.go deleted file mode 100644 index b766e643e07..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/syscall.go +++ /dev/null @@ -1,245 +0,0 @@ -package internal - -import ( - "fmt" - "path/filepath" - "runtime" - "syscall" - "unsafe" - - "github.com/cilium/ebpf/internal/unix" -) - -//go:generate stringer -output syscall_string.go -type=BPFCmd - -// BPFCmd identifies a subcommand of the bpf syscall. -type BPFCmd int - -// Well known BPF commands. -const ( - BPF_MAP_CREATE BPFCmd = iota - BPF_MAP_LOOKUP_ELEM - BPF_MAP_UPDATE_ELEM - BPF_MAP_DELETE_ELEM - BPF_MAP_GET_NEXT_KEY - BPF_PROG_LOAD - BPF_OBJ_PIN - BPF_OBJ_GET - BPF_PROG_ATTACH - BPF_PROG_DETACH - BPF_PROG_TEST_RUN - BPF_PROG_GET_NEXT_ID - BPF_MAP_GET_NEXT_ID - BPF_PROG_GET_FD_BY_ID - BPF_MAP_GET_FD_BY_ID - BPF_OBJ_GET_INFO_BY_FD - BPF_PROG_QUERY - BPF_RAW_TRACEPOINT_OPEN - BPF_BTF_LOAD - BPF_BTF_GET_FD_BY_ID - BPF_TASK_FD_QUERY - BPF_MAP_LOOKUP_AND_DELETE_ELEM - BPF_MAP_FREEZE - BPF_BTF_GET_NEXT_ID - BPF_MAP_LOOKUP_BATCH - BPF_MAP_LOOKUP_AND_DELETE_BATCH - BPF_MAP_UPDATE_BATCH - BPF_MAP_DELETE_BATCH - BPF_LINK_CREATE - BPF_LINK_UPDATE - BPF_LINK_GET_FD_BY_ID - BPF_LINK_GET_NEXT_ID - BPF_ENABLE_STATS - BPF_ITER_CREATE -) - -// BPF wraps SYS_BPF. -// -// Any pointers contained in attr must use the Pointer type from this package. -func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { - r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) - runtime.KeepAlive(attr) - - var err error - if errNo != 0 { - err = wrappedErrno{errNo} - } - - return r1, err -} - -type BPFProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 -} - -func BPFProgAttach(attr *BPFProgAttachAttr) error { - _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFProgDetachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 -} - -func BPFProgDetach(attr *BPFProgDetachAttr) error { - _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - -type BPFEnableStatsAttr struct { - StatsType uint32 -} - -func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) { - ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, fmt.Errorf("enable stats: %w", err) - } - return NewFD(uint32(ptr)), nil - -} - -type bpfObjAttr struct { - fileName Pointer - fd uint32 - fileFlags uint32 -} - -const bpfFSType = 0xcafe4a11 - -// BPFObjPin wraps BPF_OBJ_PIN. -func BPFObjPin(fileName string, fd *FD) error { - dirName := filepath.Dir(fileName) - var statfs unix.Statfs_t - if err := unix.Statfs(dirName, &statfs); err != nil { - return err - } - if uint64(statfs.Type) != bpfFSType { - return fmt.Errorf("%s is not on a bpf filesystem", fileName) - } - - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fd: value, - } - _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("pin object %s: %w", fileName, err) - } - return nil -} - -// BPFObjGet wraps BPF_OBJ_GET. -func BPFObjGet(fileName string, flags uint32) (*FD, error) { - attr := bpfObjAttr{ - fileName: NewStringPointer(fileName), - fileFlags: flags, - } - ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return nil, fmt.Errorf("get object %s: %w", fileName, err) - } - return NewFD(uint32(ptr)), nil -} - -type bpfObjGetInfoByFDAttr struct { - fd uint32 - infoLen uint32 - info Pointer -} - -// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD. -// -// Available from 4.13. -func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { - value, err := fd.Value() - if err != nil { - return err - } - - attr := bpfObjGetInfoByFDAttr{ - fd: value, - infoLen: uint32(size), - info: NewPointer(info), - } - _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - if err != nil { - return fmt.Errorf("fd %v: %w", fd, err) - } - return nil -} - -// BPFObjName is a null-terminated string made up of -// 'A-Za-z0-9_' characters. -type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte - -// NewBPFObjName truncates the result if it is too long. -func NewBPFObjName(name string) BPFObjName { - var result BPFObjName - copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) - return result -} - -type BPFMapCreateAttr struct { - MapType uint32 - KeySize uint32 - ValueSize uint32 - MaxEntries uint32 - Flags uint32 - InnerMapFd uint32 // since 4.12 56f668dfe00d - NumaNode uint32 // since 4.14 96eabe7a40aa - MapName BPFObjName // since 4.15 ad5b177bd73f - MapIfIndex uint32 - BTFFd uint32 - BTFKeyTypeID uint32 - BTFValueTypeID uint32 -} - -func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) { - fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - - return NewFD(uint32(fd)), nil -} - -// wrappedErrno wraps syscall.Errno to prevent direct comparisons with -// syscall.E* or unix.E* constants. -// -// You should never export an error of this type. -type wrappedErrno struct { - syscall.Errno -} - -func (we wrappedErrno) Unwrap() error { - return we.Errno -} - -type syscallError struct { - error - errno syscall.Errno -} - -func SyscallError(err error, errno syscall.Errno) error { - return &syscallError{err, errno} -} - -func (se *syscallError) Is(target error) bool { - return target == se.error -} - -func (se *syscallError) Unwrap() error { - return se.errno -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/agent/vendor/github.com/cilium/ebpf/internal/syscall_string.go deleted file mode 100644 index 85df0477973..00000000000 --- a/agent/vendor/github.com/cilium/ebpf/internal/syscall_string.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT. - -package internal - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BPF_MAP_CREATE-0] - _ = x[BPF_MAP_LOOKUP_ELEM-1] - _ = x[BPF_MAP_UPDATE_ELEM-2] - _ = x[BPF_MAP_DELETE_ELEM-3] - _ = x[BPF_MAP_GET_NEXT_KEY-4] - _ = x[BPF_PROG_LOAD-5] - _ = x[BPF_OBJ_PIN-6] - _ = x[BPF_OBJ_GET-7] - _ = x[BPF_PROG_ATTACH-8] - _ = x[BPF_PROG_DETACH-9] - _ = x[BPF_PROG_TEST_RUN-10] - _ = x[BPF_PROG_GET_NEXT_ID-11] - _ = x[BPF_MAP_GET_NEXT_ID-12] - _ = x[BPF_PROG_GET_FD_BY_ID-13] - _ = x[BPF_MAP_GET_FD_BY_ID-14] - _ = x[BPF_OBJ_GET_INFO_BY_FD-15] - _ = x[BPF_PROG_QUERY-16] - _ = x[BPF_RAW_TRACEPOINT_OPEN-17] - _ = x[BPF_BTF_LOAD-18] - _ = x[BPF_BTF_GET_FD_BY_ID-19] - _ = x[BPF_TASK_FD_QUERY-20] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21] - _ = x[BPF_MAP_FREEZE-22] - _ = x[BPF_BTF_GET_NEXT_ID-23] - _ = x[BPF_MAP_LOOKUP_BATCH-24] - _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25] - _ = x[BPF_MAP_UPDATE_BATCH-26] - _ = x[BPF_MAP_DELETE_BATCH-27] - _ = x[BPF_LINK_CREATE-28] - _ = x[BPF_LINK_UPDATE-29] - _ = x[BPF_LINK_GET_FD_BY_ID-30] - _ = x[BPF_LINK_GET_NEXT_ID-31] - _ = x[BPF_ENABLE_STATS-32] - _ = x[BPF_ITER_CREATE-33] -} - -const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE" - -var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617} - -func (i BPFCmd) String() string { - if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) { - return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]] -} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/agent/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 0a18eaf0cf8..db4a1f5bf9e 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package unix import ( - "bytes" "syscall" linux "golang.org/x/sys/unix" @@ -20,10 +20,13 @@ const ( EPERM = linux.EPERM ESRCH = linux.ESRCH ENODEV = linux.ENODEV + EBADF = linux.EBADF + E2BIG = linux.E2BIG + EFAULT = linux.EFAULT + EACCES = linux.EACCES // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) - EBADF = linux.EBADF BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE BPF_F_RDONLY = linux.BPF_F_RDONLY @@ -35,6 +38,9 @@ const ( BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ SYS_BPF = linux.SYS_BPF F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD @@ -61,19 +67,19 @@ const ( PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE AT_FDCWD = linux.AT_FDCWD RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET ) // Statfs_t is a wrapper type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t + // Rlimit is a wrapper type Rlimit = linux.Rlimit -// Setrlimit is a wrapper -func Setrlimit(resource int, rlim *Rlimit) (err error) { - return linux.Setrlimit(resource, rlim) -} - // Syscall is a wrapper func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { return linux.Syscall(trap, a1, a2, a3) @@ -191,14 +197,14 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) } -func KernelRelease() (string, error) { - var uname Utsname - err := Uname(&uname) - if err != nil { - return "", err - } +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} + +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) +} - end := bytes.IndexByte(uname.Release[:], 0) - release := string(uname.Release[:end]) - return release, nil +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) } diff --git a/agent/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/agent/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index 1b06defc0a0..133c267dbc3 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package unix @@ -21,6 +22,9 @@ const ( ESRCH = syscall.ESRCH ENODEV = syscall.ENODEV EBADF = syscall.Errno(0) + E2BIG = syscall.Errno(0) + EFAULT = syscall.EFAULT + EACCES = syscall.Errno(0) // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) @@ -35,6 +39,9 @@ const ( BPF_F_INNER_MAP = 0 BPF_OBJ_NAME_LEN = 0x10 BPF_TAG_SIZE = 0x8 + BPF_RINGBUF_BUSY_BIT = 0 + BPF_RINGBUF_DISCARD_BIT = 0 + BPF_RINGBUF_HDR_SZ = 0 SYS_BPF = 321 F_DUPFD_CLOEXEC = 0x406 EPOLLIN = 0x1 @@ -62,6 +69,9 @@ const ( PERF_RECORD_SAMPLE = 9 AT_FDCWD = -0x2 RENAME_NOREPLACE = 0x1 + SO_ATTACH_BPF = 0x32 + SO_DETACH_BPF = 0x1b + SOL_SOCKET = 0x1 ) // Statfs_t is a wrapper @@ -80,17 +90,14 @@ type Statfs_t struct { Spare [4]int64 } +type Stat_t struct{} + // Rlimit is a wrapper type Rlimit struct { Cur uint64 Max uint64 } -// Setrlimit is a wrapper -func Setrlimit(resource int, rlim *Rlimit) (err error) { - return errNonLinux -} - // Syscall is a wrapper func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { return 0, 0, syscall.Errno(1) @@ -258,6 +265,14 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags return errNonLinux } -func KernelRelease() (string, error) { - return "", errNonLinux +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux +} + +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux +} + +func Fstat(fd int, stat *Stat_t) error { + return errNonLinux } diff --git a/agent/vendor/github.com/cilium/ebpf/internal/vdso.go b/agent/vendor/github.com/cilium/ebpf/internal/vdso.go new file mode 100644 index 00000000000..ae4821de20c --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/internal/vdso.go @@ -0,0 +1,150 @@ +package internal + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + // Read data from the auxiliary vector, which is normally passed directly + // to the process. Go does not expose that data, so we must read it from procfs. + // https://man7.org/linux/man-pages/man3/getauxval.3.html + av, err := os.Open("/proc/self/auxv") + if err != nil { + return 0, fmt.Errorf("opening auxv: %w", err) + } + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r io.Reader) (uint64, error) { + const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image + ) + + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + aux := struct{ Tag, Val uint64 }{} + for { + if err := binary.Read(r, NativeEndian, &aux); err != nil { + return 0, fmt.Errorf("reading auxv entry: %w", err) + } + + switch aux.Tag { + case _AT_SYSINFO_EHDR: + if aux.Val != 0 { + return aux.Val, nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, Align(int(n.NameSize), 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/agent/vendor/github.com/cilium/ebpf/internal/version.go b/agent/vendor/github.com/cilium/ebpf/internal/version.go index 1a678bfe657..370e01e4447 100644 --- a/agent/vendor/github.com/cilium/ebpf/internal/version.go +++ b/agent/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,8 +2,6 @@ package internal import ( "fmt" - "io/ioutil" - "regexp" "sync" "github.com/cilium/ebpf/internal/unix" @@ -18,12 +16,6 @@ const ( ) var ( - // Match between one and three decimals separated by dots, with the last - // segment (patch level) being optional on some kernels. - // The x.y.z string must appear at the start of a string or right after - // whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'. - rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`) - kernelVersion = struct { once sync.Once version Version @@ -46,6 +38,15 @@ func NewVersion(ver string) (Version, error) { return Version{major, minor, patch}, nil } +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + func (v Version) String() string { if v[2] == 0 { return fmt.Sprintf("v%d.%d", v[0], v[1]) @@ -98,66 +99,24 @@ func KernelVersion() (Version, error) { return kernelVersion.version, nil } -// detectKernelVersion returns the version of the running kernel. It scans the -// following sources in order: /proc/version_signature, uname -v, uname -r. -// In each of those locations, the last-appearing x.y(.z) value is selected -// for parsing. The first location that yields a usable version number is -// returned. +// detectKernelVersion returns the version of the running kernel. func detectKernelVersion() (Version, error) { - - // Try reading /proc/version_signature for Ubuntu compatibility. - // Example format: Ubuntu 4.15.0-91.92-generic 4.15.18 - // This method exists in the kernel itself, see d18acd15c - // ("perf tools: Fix kernel version error in ubuntu"). - if pvs, err := ioutil.ReadFile("/proc/version_signature"); err == nil { - // If /proc/version_signature exists, failing to parse it is an error. - // It only exists on Ubuntu, where the real patch level is not obtainable - // through any other method. - v, err := findKernelVersion(string(pvs)) - if err != nil { - return Version{}, err - } - return v, nil - } - - var uname unix.Utsname - if err := unix.Uname(&uname); err != nil { - return Version{}, fmt.Errorf("calling uname: %w", err) - } - - // Debian puts the version including the patch level in uname.Version. - // It is not an error if there's no version number in uname.Version, - // as most distributions don't use it. Parsing can continue on uname.Release. - // Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08) - if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil { - return v, nil - } - - // Most other distributions have the full kernel version including patch - // level in uname.Release. - // Example format: 4.19.0-5-amd64, 5.5.10-arch1-1 - v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:])) + vc, err := vdsoVersion() if err != nil { return Version{}, err } - - return v, nil + return NewVersionFromCode(vc), nil } -// findKernelVersion matches s against rgxKernelVersion and parses the result -// into a Version. If s contains multiple matches, the last entry is selected. -func findKernelVersion(s string) (Version, error) { - m := rgxKernelVersion.FindAllString(s, -1) - if m == nil { - return Version{}, fmt.Errorf("no kernel version in string: %s", s) - } - // Pick the last match of the string in case there are multiple. - s = m[len(m)-1] - - v, err := NewVersion(s) - if err != nil { - return Version{}, fmt.Errorf("parsing version string %s: %w", s, err) +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) } - return v, nil + return unix.ByteSliceToString(uname.Release[:]), nil } diff --git a/agent/vendor/github.com/cilium/ebpf/link/cgroup.go b/agent/vendor/github.com/cilium/ebpf/link/cgroup.go index 5540bb068cd..003b0638e89 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/cgroup.go +++ b/agent/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -56,16 +56,6 @@ func AttachCgroup(opts CgroupOptions) (Link, error) { return cg, nil } -// LoadPinnedCgroup loads a pinned cgroup from a bpffs. -func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { - link, err := LoadPinnedRawLink(fileName, CgroupType, opts) - if err != nil { - return nil, err - } - - return &linkCgroup{*link}, nil -} - type progAttachCgroup struct { cgroup *os.File current *ebpf.Program @@ -151,6 +141,10 @@ func (cg *progAttachCgroup) Unpin() error { return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) } +func (cg *progAttachCgroup) Info() (*Info, error) { + return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) +} + type linkCgroup struct { RawLink } diff --git a/agent/vendor/github.com/cilium/ebpf/link/iter.go b/agent/vendor/github.com/cilium/ebpf/link/iter.go index 654d34ef848..d2b32ef331c 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/iter.go +++ b/agent/vendor/github.com/cilium/ebpf/link/iter.go @@ -6,7 +6,7 @@ import ( "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type IterOptions struct { @@ -31,26 +31,26 @@ func AttachIter(opts IterOptions) (*Iter, error) { progFd := opts.Program.FD() if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } var info bpfIterLinkInfoMap if opts.Map != nil { mapFd := opts.Map.FD() if mapFd < 0 { - return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) } info.map_fd = uint32(mapFd) } - attr := bpfLinkCreateIterAttr{ - prog_fd: uint32(progFd), - attach_type: ebpf.AttachTraceIter, - iter_info: internal.NewPointer(unsafe.Pointer(&info)), - iter_info_len: uint32(unsafe.Sizeof(info)), + attr := sys.LinkCreateIterAttr{ + ProgFd: uint32(progFd), + AttachType: sys.AttachType(ebpf.AttachTraceIter), + IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfoLen: uint32(unsafe.Sizeof(info)), } - fd, err := bpfLinkCreateIter(&attr) + fd, err := sys.LinkCreateIter(&attr) if err != nil { return nil, fmt.Errorf("can't link iterator: %w", err) } @@ -58,16 +58,6 @@ func AttachIter(opts IterOptions) (*Iter, error) { return &Iter{RawLink{fd, ""}}, err } -// LoadPinnedIter loads a pinned iterator from a bpffs. -func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) { - link, err := LoadPinnedRawLink(fileName, IterType, opts) - if err != nil { - return nil, err - } - - return &Iter{*link}, err -} - // Iter represents an attached bpf_iter. type Iter struct { RawLink @@ -77,16 +67,11 @@ type Iter struct { // // Reading from the returned reader triggers the BPF program. func (it *Iter) Open() (io.ReadCloser, error) { - linkFd, err := it.fd.Value() - if err != nil { - return nil, err - } - - attr := &bpfIterCreateAttr{ - linkFd: linkFd, + attr := &sys.IterCreateAttr{ + LinkFd: it.fd.Uint(), } - fd, err := bpfIterCreate(attr) + fd, err := sys.IterCreate(attr) if err != nil { return nil, fmt.Errorf("can't create iterator: %w", err) } diff --git a/agent/vendor/github.com/cilium/ebpf/link/kprobe.go b/agent/vendor/github.com/cilium/ebpf/link/kprobe.go index ea71d6d608a..fdf622a0c07 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/agent/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -5,15 +5,16 @@ import ( "crypto/rand" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" + "strings" "sync" + "syscall" "unsafe" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -29,6 +30,27 @@ var ( type probeType uint8 +type probeArgs struct { + symbol, group, path string + offset, refCtrOffset, cookie uint64 + pid int + ret bool +} + +// KprobeOptions defines additional parameters that will be used +// when loading Kprobes. +type KprobeOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Offset of the kprobe relative to the traced symbol. + // Can be used to insert kprobes at arbitrary offsets in kernel functions, + // e.g. in places where functions have been inlined. + Offset uint64 +} + const ( kprobeType probeType = iota uprobeType @@ -72,68 +94,109 @@ func (pt probeType) RetprobeBit() (uint64, error) { // given kernel symbol starts executing. See /proc/kallsyms for available // symbols. For example, printk(): // -// Kprobe("printk", prog) +// kp, err := Kprobe("printk", prog, nil) // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. -func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, false) +// Losing the reference to the resulting Link (kp) will close the Kprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, false) if err != nil { return nil, err } - err = k.attach(prog) + lnk, err := attachPerfEvent(k, prog) if err != nil { k.Close() return nil, err } - return k, nil + return lnk, nil } // Kretprobe attaches the given eBPF program to a perf event that fires right // before the given kernel symbol exits, with the function stack left intact. // See /proc/kallsyms for available symbols. For example, printk(): // -// Kretprobe("printk", prog) +// kp, err := Kretprobe("printk", prog, nil) // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. -func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, true) +// Losing the reference to the resulting Link (kp) will close the Kretprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, true) if err != nil { return nil, err } - err = k.attach(prog) + lnk, err := attachPerfEvent(k, prog) if err != nil { k.Close() return nil, err } - return k, nil + return lnk, nil +} + +// isValidKprobeSymbol implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_.]*$". +func isValidKprobeSymbol(s string) bool { + if len(s) < 1 { + return false + } + + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + // Allow `.` in symbol name. GCC-compiled kernel may change symbol name + // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. + // See: https://gcc.gnu.org/gcc-10/changes.html + case i > 0 && c == '.': + + default: + return false + } + } + + return true } // kprobe opens a perf event on the given symbol and attaches prog to it. // If ret is true, create a kretprobe. -func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { +func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { if symbol == "" { return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) } if prog == nil { return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) } - if !rgxTraceEvent.MatchString(symbol) { - return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput) + if !isValidKprobeSymbol(symbol) { + return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) } if prog.Type() != ebpf.Kprobe { return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) } + args := probeArgs{ + pid: perfAllThreads, + symbol: symbol, + ret: ret, + } + + if opts != nil { + args.cookie = opts.Cookie + args.offset = opts.Offset + } + // Use kprobe PMU if the kernel has it available. - tp, err := pmuKprobe(platformPrefix(symbol), ret) + tp, err := pmuKprobe(args) if errors.Is(err, os.ErrNotExist) { - tp, err = pmuKprobe(symbol, ret) + args.symbol = platformPrefix(symbol) + tp, err = pmuKprobe(args) } if err == nil { return tp, nil @@ -143,9 +206,11 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { } // Use tracefs if kprobe PMU is missing. - tp, err = tracefsKprobe(platformPrefix(symbol), ret) + args.symbol = symbol + tp, err = tracefsKprobe(args) if errors.Is(err, os.ErrNotExist) { - tp, err = tracefsKprobe(symbol, ret) + args.symbol = platformPrefix(symbol) + tp, err = tracefsKprobe(args) } if err != nil { return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) @@ -156,8 +221,8 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { // pmuKprobe opens a perf event based on the kprobe PMU. // Returns os.ErrNotExist if the given symbol does not exist in the kernel. -func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { - return pmuProbe(kprobeType, symbol, "", 0, ret) +func pmuKprobe(args probeArgs) (*perfEvent, error) { + return pmuProbe(kprobeType, args) } // pmuProbe opens a perf event based on a Performance Monitoring Unit. @@ -167,7 +232,7 @@ func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" // // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU -func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) { +func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { // Getting the PMU type will fail if the kernel doesn't support // the perf_[k,u]probe PMU. et, err := getPMUEventType(typ) @@ -176,7 +241,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per } var config uint64 - if ret { + if args.ret { bit, err := typ.RetprobeBit() if err != nil { return nil, err @@ -191,22 +256,30 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per switch typ { case kprobeType: // Create a pointer to a NUL-terminated string for the kernel. - sp, err := unsafeStringPtr(symbol) + sp, err = unsafeStringPtr(args.symbol) if err != nil { return nil, err } attr = unix.PerfEventAttr{ + // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. Use Ext2 as probe_offset. + Size: unix.PERF_ATTR_SIZE_VER1, Type: uint32(et), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Ext2: args.offset, // Kernel symbol offset Config: config, // Retprobe flag } case uprobeType: - sp, err := unsafeStringPtr(path) + sp, err = unsafeStringPtr(args.path) if err != nil { return nil, err } + if args.refCtrOffset != 0 { + config |= args.refCtrOffset << uprobeRefCtrOffsetShift + } + attr = unix.PerfEventAttr{ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, // since it added the config2 (Ext2) field. The Size field controls the @@ -215,18 +288,34 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per Size: unix.PERF_ATTR_SIZE_VER1, Type: uint32(et), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Uprobe path - Ext2: offset, // Uprobe offset - Config: config, // Retprobe flag + Ext2: args.offset, // Uprobe offset + Config: config, // RefCtrOffset, Retprobe flag } } - fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and + // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. + // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 + if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") { + return nil, fmt.Errorf("symbol '%s+%#x': older kernels don't accept dots: %w", args.symbol, args.offset, ErrNotSupported) + } // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT // is returned to the caller. if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist) + return nil, fmt.Errorf("symbol '%s+%#x' not found: %w", args.symbol, args.offset, os.ErrNotExist) + } + // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("symbol '%s+%#x' not found (bad insn boundary): %w", args.symbol, args.offset, os.ErrNotExist) + } + // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned + // when attempting to set a uprobe on a trap instruction. + if errors.Is(err, unix.ENOTSUPP) { + return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err) } if err != nil { return nil, fmt.Errorf("opening perf event: %w", err) @@ -235,18 +324,24 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per // Ensure the string pointer is not collected before PerfEventOpen returns. runtime.KeepAlive(sp) + fd, err := sys.NewFD(rawFd) + if err != nil { + return nil, err + } + // Kernel has perf_[k,u]probe PMU available, initialize perf event. return &perfEvent{ - fd: internal.NewFD(uint32(fd)), - pmuID: et, - name: symbol, - typ: typ.PerfEventType(ret), + typ: typ.PerfEventType(args.ret), + name: args.symbol, + pmuID: et, + cookie: args.cookie, + fd: fd, }, nil } // tracefsKprobe creates a Kprobe tracefs entry. -func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { - return tracefsProbe(kprobeType, symbol, "", 0, ret) +func tracefsKprobe(args probeArgs) (*perfEvent, error) { + return tracefsProbe(kprobeType, args) } // tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. @@ -255,7 +350,7 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { // Path and offset are only set in the case of uprobe(s) and are used to set // the executable/library path on the filesystem and the offset where the probe is inserted. // A perf event is then opened on the newly-created trace event and returned to the caller. -func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) { +func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) { // Generate a random string for each trace event we attempt to create. // This value is used as the 'group' token in tracefs to allow creating // multiple kprobe trace events with the same name. @@ -263,42 +358,53 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) ( if err != nil { return nil, fmt.Errorf("randomizing group name: %w", err) } + args.group = group // Before attempting to create a trace event through tracefs, // check if an event with the same group and name already exists. // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate // entry, so we need to rely on reads for detecting uniqueness. - _, err = getTraceEventID(group, symbol) + _, err = getTraceEventID(group, args.symbol) if err == nil { - return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol) + return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol) } if err != nil && !errors.Is(err, os.ErrNotExist) { - return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err) + return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err) } // Create the [k,u]probe trace event using tracefs. - if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil { + if err := createTraceFSProbeEvent(typ, args); err != nil { return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) } + defer func() { + if err != nil { + // Make sure we clean up the created tracefs event when we return error. + // If a livepatch handler is already active on the symbol, the write to + // tracefs will succeed, a trace event will show up, but creating the + // perf event will fail with EBUSY. + _ = closeTraceFSProbeEvent(typ, args.group, args.symbol) + } + }() // Get the newly-created trace event's id. - tid, err := getTraceEventID(group, symbol) + tid, err := getTraceEventID(group, args.symbol) if err != nil { return nil, fmt.Errorf("getting trace event id: %w", err) } // Kprobes are ephemeral tracepoints and share the same perf event type. - fd, err := openTracepointPerfEvent(tid) + fd, err := openTracepointPerfEvent(tid, args.pid) if err != nil { return nil, err } return &perfEvent{ - fd: fd, + typ: typ.PerfEventType(args.ret), group: group, - name: symbol, + name: args.symbol, tracefsID: tid, - typ: typ.PerfEventType(ret), + cookie: args.cookie, + fd: fd, }, nil } @@ -306,7 +412,7 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) ( // /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid // kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist // if a probe with the same group and symbol already exists. -func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error { +func createTraceFSProbeEvent(typ probeType, args probeArgs) error { // Open the kprobe_events file in tracefs. f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) if err != nil { @@ -314,7 +420,7 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u } defer f.Close() - var pe string + var pe, token string switch typ { case kprobeType: // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): @@ -331,7 +437,8 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u // subsampling or rate limiting logic can be more accurately implemented in // the eBPF program itself. // See Documentation/kprobes.txt for more details. - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol) + token = kprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token) case uprobeType: // The uprobe_events syntax is as follows: // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe @@ -340,18 +447,30 @@ func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset u // // Some examples: // r:ebpf_1234/readline /bin/bash:0x12345 - // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) // // See Documentation/trace/uprobetracer.txt for more details. - pathOffset := uprobePathOffset(path, offset) - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset) + token = uprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token) } _, err = f.WriteString(pe) // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT // is returned to the caller. + // EINVAL is also returned on pre-5.2 kernels when the `SYM[+offs]` token + // is resolved to an invalid insn boundary. if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist) + return fmt.Errorf("token %s: %w", token, os.ErrNotExist) + } + // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. + if errors.Is(err, syscall.EILSEQ) { + return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) } if err != nil { return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) @@ -371,7 +490,7 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { // See [k,u]probe_events syntax above. The probe type does not need to be specified // for removals. - pe := fmt.Sprintf("-:%s/%s", group, symbol) + pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol)) if _, err = f.WriteString(pe); err != nil { return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) } @@ -382,9 +501,9 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { // randomGroup generates a pseudorandom string for use as a tracefs group name. // Returns an error when the output string would exceed 63 characters (kernel // limitation), when rand.Read() fails or when prefix contains characters not -// allowed by rgxTraceEvent. +// allowed by isValidTraceID. func randomGroup(prefix string) (string, error) { - if !rgxTraceEvent.MatchString(prefix) { + if !isValidTraceID(prefix) { return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput) } @@ -413,7 +532,7 @@ func probePrefix(ret bool) string { func determineRetprobeBit(typ probeType) (uint64, error) { p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe") - data, err := ioutil.ReadFile(p) + data, err := os.ReadFile(p) if err != nil { return 0, err } @@ -436,3 +555,14 @@ func kretprobeBit() (uint64, error) { }) return kprobeRetprobeBit.value, kprobeRetprobeBit.err } + +// kprobeToken creates the SYM[+offs] token for the tracefs api. +func kprobeToken(args probeArgs) string { + po := args.symbol + + if args.offset != 0 { + po += fmt.Sprintf("+%#x", args.offset) + } + + return po +} diff --git a/agent/vendor/github.com/cilium/ebpf/link/link.go b/agent/vendor/github.com/cilium/ebpf/link/link.go index 16cfff415dd..067d0101aa9 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/link.go +++ b/agent/vendor/github.com/cilium/ebpf/link/link.go @@ -1,11 +1,14 @@ package link import ( + "bytes" + "encoding/binary" "fmt" - "unsafe" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) var ErrNotSupported = internal.ErrNotSupported @@ -29,17 +32,58 @@ type Link interface { // Close frees resources. // - // The link will be broken unless it has been pinned. A link - // may continue past the lifetime of the process if Close is + // The link will be broken unless it has been successfully pinned. + // A link may continue past the lifetime of the process if Close is // not called. Close() error + // Info returns metadata on a link. + // + // May return an error wrapping ErrNotSupported. + Info() (*Info, error) + // Prevent external users from implementing this interface. isLink() } +// LoadPinnedLink loads a link that was persisted into a bpffs. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err + } + + return wrapRawLink(raw) +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (Link, error) { + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + default: + return raw, nil + } +} + // ID uniquely identifies a BPF link. -type ID uint32 +type ID = sys.LinkID // RawLinkOptions control the creation of a raw link. type RawLinkOptions struct { @@ -49,13 +93,55 @@ type RawLinkOptions struct { Program *ebpf.Program // Attach must match the attach type of Program. Attach ebpf.AttachType + // BTF is the BTF of the attachment target. + BTF btf.TypeID + // Flags control the attach behaviour. + Flags uint32 } -// RawLinkInfo contains metadata on a link. -type RawLinkInfo struct { +// Info contains metadata on a link. +type Info struct { Type Type ID ID Program ebpf.ProgramID + extra interface{} +} + +type TracingInfo sys.TracingLinkInfo +type CgroupInfo sys.CgroupLinkInfo +type NetNsInfo sys.NetNsLinkInfo +type XDPInfo sys.XDPLinkInfo + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// ExtraNetNs returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e } // RawLink is the low-level API to bpf_link. @@ -63,7 +149,7 @@ type RawLinkInfo struct { // You should consider using the higher level interfaces in this // package instead. type RawLink struct { - fd *internal.FD + fd *sys.FD pinnedPath string } @@ -74,20 +160,22 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { } if opts.Target < 0 { - return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) } progFd := opts.Program.FD() if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } - attr := bpfLinkCreateAttr{ - targetFd: uint32(opts.Target), - progFd: uint32(progFd), - attachType: opts.Attach, + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(opts.Attach), + TargetBtfId: uint32(opts.BTF), + Flags: opts.Flags, } - fd, err := bpfLinkCreate(&attr) + fd, err := sys.LinkCreate(&attr) if err != nil { return nil, fmt.Errorf("can't create link: %s", err) } @@ -95,44 +183,23 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { return &RawLink{fd, ""}, nil } -// LoadPinnedRawLink loads a persisted link from a bpffs. -// -// Returns an error if the pinned link type doesn't match linkType. Pass -// UnspecifiedType to disable this behaviour. -func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) +func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, fmt.Errorf("load pinned link: %w", err) } - link := &RawLink{fd, fileName} - if linkType == UnspecifiedType { - return link, nil - } - - info, err := link.Info() - if err != nil { - link.Close() - return nil, fmt.Errorf("get pinned link info: %s", err) - } - - if info.Type != linkType { - link.Close() - return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, linkType) - } - - return link, nil + return &RawLink{fd, fileName}, nil } func (l *RawLink) isLink() {} // FD returns the raw file descriptor. func (l *RawLink) FD() int { - fd, err := l.fd.Value() - if err != nil { - return -1 - } - return int(fd) + return l.fd.Int() } // Close breaks the link. @@ -181,49 +248,66 @@ type RawLinkUpdateOptions struct { func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { newFd := opts.New.FD() if newFd < 0 { - return fmt.Errorf("invalid program: %s", internal.ErrClosedFd) + return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) } var oldFd int if opts.Old != nil { oldFd = opts.Old.FD() if oldFd < 0 { - return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd) + return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) } } - linkFd, err := l.fd.Value() - if err != nil { - return fmt.Errorf("can't update link: %s", err) - } - - attr := bpfLinkUpdateAttr{ - linkFd: linkFd, - newProgFd: uint32(newFd), - oldProgFd: uint32(oldFd), - flags: opts.Flags, + attr := sys.LinkUpdateAttr{ + LinkFd: l.fd.Uint(), + NewProgFd: uint32(newFd), + OldProgFd: uint32(oldFd), + Flags: opts.Flags, } - return bpfLinkUpdate(&attr) -} - -// struct bpf_link_info -type bpfLinkInfo struct { - typ uint32 - id uint32 - prog_id uint32 + return sys.LinkUpdate(&attr) } // Info returns metadata about the link. -func (l *RawLink) Info() (*RawLinkInfo, error) { - var info bpfLinkInfo - err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { +func (l *RawLink) Info() (*Info, error) { + var info sys.LinkInfo + + if err := sys.ObjInfo(l.fd, &info); err != nil { return nil, fmt.Errorf("link info: %s", err) } - return &RawLinkInfo{ - Type(info.typ), - ID(info.id), - ebpf.ProgramID(info.prog_id), + var extra interface{} + switch info.Type { + case CgroupType: + extra = &CgroupInfo{} + case IterType: + // not supported + case NetNsType: + extra = &NetNsInfo{} + case RawTracepointType: + // not supported + case TracingType: + extra = &TracingInfo{} + case XDPType: + extra = &XDPInfo{} + case PerfEventType: + // no extra + default: + return nil, fmt.Errorf("unknown link info type: %d", info.Type) + } + + if info.Type != RawTracepointType && info.Type != IterType && info.Type != PerfEventType { + buf := bytes.NewReader(info.Extra[:]) + err := binary.Read(buf, internal.NativeEndian, extra) + if err != nil { + return nil, fmt.Errorf("can not read extra link info: %w", err) + } + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, }, nil } diff --git a/agent/vendor/github.com/cilium/ebpf/link/netns.go b/agent/vendor/github.com/cilium/ebpf/link/netns.go index 37e5b84c4dd..344ecced6be 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/netns.go +++ b/agent/vendor/github.com/cilium/ebpf/link/netns.go @@ -6,14 +6,9 @@ import ( "github.com/cilium/ebpf" ) -// NetNsInfo contains metadata about a network namespace link. -type NetNsInfo struct { - RawLinkInfo -} - // NetNsLink is a program attached to a network namespace. type NetNsLink struct { - *RawLink + RawLink } // AttachNetNs attaches a program to a network namespace. @@ -37,24 +32,5 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { return nil, err } - return &NetNsLink{link}, nil -} - -// LoadPinnedNetNs loads a network namespace link from bpffs. -func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) { - link, err := LoadPinnedRawLink(fileName, NetNsType, opts) - if err != nil { - return nil, err - } - - return &NetNsLink{link}, nil -} - -// Info returns information about the link. -func (nns *NetNsLink) Info() (*NetNsInfo, error) { - info, err := nns.RawLink.Info() - if err != nil { - return nil, err - } - return &NetNsInfo{*info}, nil + return &NetNsLink{*link}, nil } diff --git a/agent/vendor/github.com/cilium/ebpf/link/perf_event.go b/agent/vendor/github.com/cilium/ebpf/link/perf_event.go index 5267a47ec9b..0e5bd47911b 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/agent/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -4,17 +4,17 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "path/filepath" - "regexp" "runtime" "strconv" "strings" "unsafe" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -44,11 +44,6 @@ import ( var ( tracefsPath = "/sys/kernel/debug/tracing" - // Trace event groups, names and kernel symbols must adhere to this set - // of characters. Non-empty, first character must not be a number, all - // characters must be alphanumeric or underscore. - rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$") - errInvalidInput = errors.New("invalid input") ) @@ -70,6 +65,8 @@ const ( // can be attached to it. It is created based on a tracefs trace event or a // Performance Monitoring Unit (PMU). type perfEvent struct { + // The event type determines the types of programs that can be attached. + typ perfEventType // Group and name of the tracepoint/kprobe/uprobe. group string @@ -80,53 +77,15 @@ type perfEvent struct { // ID of the trace event read from tracefs. Valid IDs are non-zero. tracefsID uint64 - // The event type determines the types of programs that can be attached. - typ perfEventType + // User provided arbitrary value. + cookie uint64 - fd *internal.FD -} - -func (pe *perfEvent) isLink() {} - -func (pe *perfEvent) Pin(string) error { - return fmt.Errorf("pin perf event: %w", ErrNotSupported) -} - -func (pe *perfEvent) Unpin() error { - return fmt.Errorf("unpin perf event: %w", ErrNotSupported) -} - -// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), -// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array -// owned by the perf event, which means multiple programs can be attached -// simultaneously. -// -// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event -// returns EEXIST. -// -// Detaching a program from a perf event is currently not possible, so a -// program replacement mechanism cannot be implemented for perf events. -func (pe *perfEvent) Update(prog *ebpf.Program) error { - return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported) + // This is the perf event FD. + fd *sys.FD } func (pe *perfEvent) Close() error { - if pe.fd == nil { - return nil - } - - pfd, err := pe.fd.Value() - if err != nil { - return fmt.Errorf("getting perf event fd: %w", err) - } - - err = unix.IoctlSetInt(int(pfd), unix.PERF_EVENT_IOC_DISABLE, 0) - if err != nil { - return fmt.Errorf("disabling perf event: %w", err) - } - - err = pe.fd.Close() - if err != nil { + if err := pe.fd.Close(); err != nil { return fmt.Errorf("closing perf event fd: %w", err) } @@ -149,49 +108,150 @@ func (pe *perfEvent) Close() error { return nil } +// perfEventLink represents a bpf perf link. +type perfEventLink struct { + RawLink + pe *perfEvent +} + +func (pl *perfEventLink) isLink() {} + +// Pinning requires the underlying perf event FD to stay open. +// +// | PerfEvent FD | BpfLink FD | Works | +// |--------------|------------|-------| +// | Open | Open | Yes | +// | Closed | Open | No | +// | Open | Closed | No (Pin() -> EINVAL) | +// | Closed | Closed | No (Pin() -> EINVAL) | +// +// There is currently no pretty way to recover the perf event FD +// when loading a pinned link, so leave as not supported for now. +func (pl *perfEventLink) Pin(string) error { + return fmt.Errorf("perf event link pin: %w", ErrNotSupported) +} + +func (pl *perfEventLink) Unpin() error { + return fmt.Errorf("perf event link unpin: %w", ErrNotSupported) +} + +func (pl *perfEventLink) Close() error { + if err := pl.pe.Close(); err != nil { + return fmt.Errorf("perf event link close: %w", err) + } + return pl.fd.Close() +} + +func (pl *perfEventLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event link update: %w", ErrNotSupported) +} + +// perfEventIoctl implements Link and handles the perf event lifecycle +// via ioctl(). +type perfEventIoctl struct { + *perfEvent +} + +func (pi *perfEventIoctl) isLink() {} + +// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), +// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array +// owned by the perf event, which means multiple programs can be attached +// simultaneously. +// +// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event +// returns EEXIST. +// +// Detaching a program from a perf event is currently not possible, so a +// program replacement mechanism cannot be implemented for perf events. +func (pi *perfEventIoctl) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Pin(string) error { + return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Unpin() error { + return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Info() (*Info, error) { + return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) +} + // attach the given eBPF prog to the perf event stored in pe. // pe must contain a valid perf event fd. // prog's type must match the program type stored in pe. -func (pe *perfEvent) attach(prog *ebpf.Program) error { +func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) { if prog == nil { - return errors.New("cannot attach a nil program") - } - if pe.fd == nil { - return errors.New("cannot attach to nil perf event") + return nil, errors.New("cannot attach a nil program") } if prog.FD() < 0 { - return fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } + switch pe.typ { case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: if t := prog.Type(); t != ebpf.Kprobe { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) + return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) } case tracepointEvent: if t := prog.Type(); t != ebpf.TracePoint { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) + return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) } default: - return fmt.Errorf("unknown perf event type: %d", pe.typ) + return nil, fmt.Errorf("unknown perf event type: %d", pe.typ) + } + + if err := haveBPFLinkPerfEvent(); err == nil { + return attachPerfEventLink(pe, prog) } + return attachPerfEventIoctl(pe, prog) +} - // The ioctl below will fail when the fd is invalid. - kfd, _ := pe.fd.Value() +func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { + if pe.cookie != 0 { + return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) + } // Assign the eBPF program to the perf event. - err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) if err != nil { - return fmt.Errorf("setting perf event bpf program: %w", err) + return nil, fmt.Errorf("setting perf event bpf program: %w", err) } // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. - if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { - return fmt.Errorf("enable perf event: %s", err) + if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return nil, fmt.Errorf("enable perf event: %s", err) } + pi := &perfEventIoctl{pe} + // Close the perf event when its reference is lost to avoid leaking system resources. - runtime.SetFinalizer(pe, (*perfEvent).Close) - return nil + runtime.SetFinalizer(pi, (*perfEventIoctl).Close) + return pi, nil +} + +// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). +// +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) { + fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + TargetFd: pe.fd.Uint(), + AttachType: sys.BPF_PERF_EVENT, + BpfCookie: pe.cookie, + }) + if err != nil { + return nil, fmt.Errorf("cannot create bpf perf link: %v", err) + } + + pl := &perfEventLink{RawLink{fd: fd}, pe} + + // Close the perf event when its reference is lost to avoid leaking system resources. + runtime.SetFinalizer(pl, (*perfEventLink).Close) + return pl, nil } // unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. @@ -204,8 +264,12 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) { } // getTraceEventID reads a trace event's ID from tracefs given its group and name. -// group and name must be alphanumeric or underscore, as required by the kernel. +// The kernel requires group and name to be alphanumeric or underscore. +// +// name automatically has its invalid symbols converted to underscores so the caller +// can pass a raw symbol name, e.g. a kernel symbol containing dots. func getTraceEventID(group, name string) (uint64, error) { + name = sanitizeSymbol(name) tid, err := uint64FromFile(tracefsPath, "events", group, name, "id") if errors.Is(err, os.ErrNotExist) { return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist) @@ -236,7 +300,7 @@ func getPMUEventType(typ probeType) (uint64, error) { // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide // [k,u]probes created by writing to /[k,u]probe_events are tracepoints // behind the scenes, and can be attached to using these perf events. -func openTracepointPerfEvent(tid uint64) (*internal.FD, error) { +func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { attr := unix.PerfEventAttr{ Type: unix.PERF_TYPE_TRACEPOINT, Config: tid, @@ -245,12 +309,12 @@ func openTracepointPerfEvent(tid uint64) (*internal.FD, error) { Wakeup: 1, } - fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) if err != nil { return nil, fmt.Errorf("opening tracepoint perf event: %w", err) } - return internal.NewFD(uint32(fd)), nil + return sys.NewFD(fd) } // uint64FromFile reads a uint64 from a file. All elements of path are sanitized @@ -263,7 +327,7 @@ func uint64FromFile(base string, path ...string) (uint64, error) { return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput) } - data, err := ioutil.ReadFile(p) + data, err := os.ReadFile(p) if err != nil { return 0, fmt.Errorf("reading file %s: %w", p, err) } @@ -271,3 +335,60 @@ func uint64FromFile(base string, path ...string) (uint64, error) { et := bytes.TrimSpace(data) return strconv.ParseUint(string(et), 10, 64) } + +// Probe BPF perf link. +// +// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_bpf_perf_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + return err + } + defer prog.Close() + + _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_PERF_EVENT, + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +// isValidTraceID implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set +// of characters. Non-empty, first character must not be a number, all +// characters must be alphanumeric or underscore. +func isValidTraceID(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + default: + return false + } + } + + return true +} diff --git a/agent/vendor/github.com/cilium/ebpf/link/program.go b/agent/vendor/github.com/cilium/ebpf/link/program.go index b90c4574676..ea31817377f 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/program.go +++ b/agent/vendor/github.com/cilium/ebpf/link/program.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type RawAttachProgramOptions struct { @@ -34,7 +34,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { replaceFd = uint32(opts.Replace.FD()) } - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ TargetFd: uint32(opts.Target), AttachBpfFd: uint32(opts.Program.FD()), ReplaceBpfFd: replaceFd, @@ -42,7 +42,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error { AttachFlags: uint32(opts.Flags), } - if err := internal.BPFProgAttach(&attr); err != nil { + if err := sys.ProgAttach(&attr); err != nil { return fmt.Errorf("can't attach program: %w", err) } return nil @@ -63,12 +63,12 @@ func RawDetachProgram(opts RawDetachProgramOptions) error { return err } - attr := internal.BPFProgDetachAttr{ + attr := sys.ProgDetachAttr{ TargetFd: uint32(opts.Target), AttachBpfFd: uint32(opts.Program.FD()), AttachType: uint32(opts.Attach), } - if err := internal.BPFProgDetach(&attr); err != nil { + if err := sys.ProgDetach(&attr); err != nil { return fmt.Errorf("can't detach program: %w", err) } diff --git a/agent/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/agent/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go index f4beb1e0786..925e621cbbc 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ b/agent/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -1,10 +1,11 @@ package link import ( + "errors" "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) type RawTracepointOptions struct { @@ -22,40 +23,65 @@ func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) } if opts.Program.FD() < 0 { - return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } - fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{ - name: internal.NewStringPointer(opts.Name), - fd: uint32(opts.Program.FD()), + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + Name: sys.NewStringPointer(opts.Name), + ProgFd: uint32(opts.Program.FD()), }) if err != nil { return nil, err } - return &progAttachRawTracepoint{fd: fd}, nil + err = haveBPFLink() + if errors.Is(err, ErrNotSupported) { + // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") + // raw_tracepoints are just a plain fd. + return &simpleRawTracepoint{fd}, nil + } + + if err != nil { + return nil, err + } + + return &rawTracepoint{RawLink{fd: fd}}, nil } -type progAttachRawTracepoint struct { - fd *internal.FD +type simpleRawTracepoint struct { + fd *sys.FD } -var _ Link = (*progAttachRawTracepoint)(nil) +var _ Link = (*simpleRawTracepoint)(nil) -func (rt *progAttachRawTracepoint) isLink() {} +func (frt *simpleRawTracepoint) isLink() {} -func (rt *progAttachRawTracepoint) Close() error { - return rt.fd.Close() +func (frt *simpleRawTracepoint) Close() error { + return frt.fd.Close() } -func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error { - return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported) +func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) } -func (rt *progAttachRawTracepoint) Pin(_ string) error { - return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported) +func (frt *simpleRawTracepoint) Pin(string) error { + return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) } -func (rt *progAttachRawTracepoint) Unpin() error { +func (frt *simpleRawTracepoint) Unpin() error { return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) } + +func (frt *simpleRawTracepoint) Info() (*Info, error) { + return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) +} + +type rawTracepoint struct { + RawLink +} + +var _ Link = (*rawTracepoint)(nil) + +func (rt *rawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} diff --git a/agent/vendor/github.com/cilium/ebpf/link/socket_filter.go b/agent/vendor/github.com/cilium/ebpf/link/socket_filter.go new file mode 100644 index 00000000000..94f3958cc4d --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -0,0 +1,40 @@ +package link + +import ( + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/unix" +) + +// AttachSocketFilter attaches a SocketFilter BPF program to a socket. +func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + }) + if ssoErr != nil { + return ssoErr + } + return err +} + +// DetachSocketFilter detaches a SocketFilter BPF program from a socket. +func DetachSocketFilter(conn syscall.Conn) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + }) + if ssoErr != nil { + return ssoErr + } + return err +} diff --git a/agent/vendor/github.com/cilium/ebpf/link/syscalls.go b/agent/vendor/github.com/cilium/ebpf/link/syscalls.go index 30e8a880507..a661395b360 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/agent/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -2,35 +2,33 @@ package link import ( "errors" - "unsafe" "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // Type is the kind of link. -type Type uint32 +type Type = sys.LinkType // Valid link types. -// -// Equivalent to enum bpf_link_type. const ( - UnspecifiedType Type = iota - RawTracepointType - TracingType - CgroupType - IterType - NetNsType - XDPType + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT ) var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", + Type: ebpf.CGroupSKB, + License: "MIT", Instructions: asm.Instructions{ asm.Mov.Imm(asm.R0, 0), asm.Return(), @@ -69,7 +67,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't // present. - attr := internal.BPFProgAttachAttr{ + attr := sys.ProgAttachAttr{ // We rely on this being checked after attachFlags. TargetFd: ^uint32(0), AttachBpfFd: uint32(prog.FD()), @@ -77,7 +75,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace AttachFlags: uint32(flagReplace), } - err = internal.BPFProgAttach(&attr) + err = sys.ProgAttach(&attr) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported } @@ -87,72 +85,14 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace return err }) -type bpfLinkCreateAttr struct { - progFd uint32 - targetFd uint32 - attachType ebpf.AttachType - flags uint32 -} - -func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkCreateIterAttr struct { - prog_fd uint32 - target_fd uint32 - attach_type ebpf.AttachType - flags uint32 - iter_info internal.Pointer - iter_info_len uint32 -} - -func bpfLinkCreateIter(attr *bpfLinkCreateIterAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err != nil { - return nil, err - } - return internal.NewFD(uint32(ptr)), nil -} - -type bpfLinkUpdateAttr struct { - linkFd uint32 - newProgFd uint32 - flags uint32 - oldProgFd uint32 -} - -func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error { - _, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err -} - var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { - prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", - Instructions: asm.Instructions{ - asm.Mov.Imm(asm.R0, 0), - asm.Return(), - }, - }) - if err != nil { - return internal.ErrNotSupported - } - defer prog.Close() - - attr := bpfLinkCreateAttr{ + attr := sys.LinkCreateAttr{ // This is a hopefully invalid file descriptor, which triggers EBADF. - targetFd: ^uint32(0), - progFd: uint32(prog.FD()), - attachType: ebpf.AttachCGroupInetIngress, + TargetFd: ^uint32(0), + ProgFd: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), } - _, err = bpfLinkCreate(&attr) + _, err := sys.LinkCreate(&attr) if errors.Is(err, unix.EINVAL) { return internal.ErrNotSupported } @@ -161,30 +101,3 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { } return err }) - -type bpfIterCreateAttr struct { - linkFd uint32 - flags uint32 -} - -func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} - -type bpfRawTracepointOpenAttr struct { - name internal.Pointer - fd uint32 - _ uint32 -} - -func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) { - ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - if err == nil { - return internal.NewFD(uint32(ptr)), nil - } - return nil, err -} diff --git a/agent/vendor/github.com/cilium/ebpf/link/tracepoint.go b/agent/vendor/github.com/cilium/ebpf/link/tracepoint.go index b8ae04bf0a0..a59ef9d1c52 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/tracepoint.go +++ b/agent/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -6,23 +6,37 @@ import ( "github.com/cilium/ebpf" ) +// TracepointOptions defines additional parameters that will be used +// when loading Tracepoints. +type TracepointOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 +} + // Tracepoint attaches the given eBPF program to the tracepoint with the given // group and name. See /sys/kernel/debug/tracing/events to find available // tracepoints. The top-level directory is the group, the event's subdirectory // is the name. Example: // -// Tracepoint("syscalls", "sys_enter_fork", prog) +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) +// +// Losing the reference to the resulting Link (tp) will close the Tracepoint +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. // // Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is // only possible as of kernel 4.14 (commit cf5f5ce). -func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { +func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { if group == "" || name == "" { return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) } if prog == nil { return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) } - if !rgxTraceEvent.MatchString(group) || !rgxTraceEvent.MatchString(name) { + if !isValidTraceID(group) || !isValidTraceID(name) { return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput) } if prog.Type() != ebpf.TracePoint { @@ -34,23 +48,30 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { return nil, err } - fd, err := openTracepointPerfEvent(tid) + fd, err := openTracepointPerfEvent(tid, perfAllThreads) if err != nil { return nil, err } + var cookie uint64 + if opts != nil { + cookie = opts.Cookie + } + pe := &perfEvent{ - fd: fd, - tracefsID: tid, + typ: tracepointEvent, group: group, name: name, - typ: tracepointEvent, + tracefsID: tid, + cookie: cookie, + fd: fd, } - if err := pe.attach(prog); err != nil { + lnk, err := attachPerfEvent(pe, prog) + if err != nil { pe.Close() return nil, err } - return pe, nil + return lnk, nil } diff --git a/agent/vendor/github.com/cilium/ebpf/link/tracing.go b/agent/vendor/github.com/cilium/ebpf/link/tracing.go new file mode 100644 index 00000000000..e47e61a3b84 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/link/tracing.go @@ -0,0 +1,141 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sys" +) + +type tracing struct { + RawLink +} + +func (f *tracing) Update(new *ebpf.Program) error { + return fmt.Errorf("tracing update: %w", ErrNotSupported) +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + btfHandle, err := targetProg.Handle() + if err != nil { + return nil, err + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return nil, err + } + + var function *btf.Func + if err := spec.TypeByName(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID, err = spec.TypeID(function) + if err != nil { + return nil, err + } + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if err != nil { + return nil, err + } + + return &tracing{*link}, nil +} + +type TracingOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or + // AttachTraceRawTp. + Program *ebpf.Program +} + +type LSMOptions struct { + // Program must be of type LSM with attach type + // AttachLSMMac. + Program *ebpf.Program +} + +// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. +func attachBTFID(program *ebpf.Program) (Link, error) { + if program.FD() < 0 { + return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) + } + + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if err != nil { + return nil, err + } + + raw := RawLink{fd: fd} + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + if info.Type == RawTracepointType { + // Sadness upon sadness: a Tracing program with AttachRawTp returns + // a raw_tracepoint link. Other types return a tracing link. + return &rawTracepoint{raw}, nil + } + + return &tracing{RawLink: RawLink{fd: fd}}, nil +} + +// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or +// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined +// in kernel modules. +func AttachTracing(opts TracingOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.Tracing { + return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) + } + + return attachBTFID(opts.Program) +} + +// AttachLSM links a Linux security module (LSM) BPF Program to a BPF +// hook defined in kernel modules. +func AttachLSM(opts LSMOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.LSM { + return nil, fmt.Errorf("invalid program type %s, expected LSM", t) + } + + return attachBTFID(opts.Program) +} diff --git a/agent/vendor/github.com/cilium/ebpf/link/uprobe.go b/agent/vendor/github.com/cilium/ebpf/link/uprobe.go index 2bc395ee3cd..edf925b5702 100644 --- a/agent/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/agent/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -6,7 +6,7 @@ import ( "fmt" "os" "path/filepath" - "regexp" + "strings" "sync" "github.com/cilium/ebpf" @@ -16,36 +16,72 @@ import ( var ( uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events") - // rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol - // as they are not allowed to be used as the EVENT token in tracefs. - rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+") - uprobeRetprobeBit = struct { once sync.Once value uint64 err error }{} + + uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 + uprobeRefCtrOffsetShift = 32 + haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error { + _, err := os.Stat(uprobeRefCtrOffsetPMUPath) + if err != nil { + return internal.ErrNotSupported + } + return nil + }) + + // ErrNoSymbol indicates that the given symbol was not found + // in the ELF symbols table. + ErrNoSymbol = errors.New("not found") ) // Executable defines an executable program on the filesystem. type Executable struct { // Path of the executable on the filesystem. path string - // Parsed ELF symbols and dynamic symbols. - symbols map[string]elf.Symbol + // Parsed ELF and dynamic symbols' addresses. + addresses map[string]uint64 } // UprobeOptions defines additional parameters that will be used // when loading Uprobes. type UprobeOptions struct { - // Symbol offset. Must be provided in case of external symbols (shared libs). - // If set, overrides the offset eventually parsed from the executable. + // Symbol address. Must be provided in case of external symbols (shared libs). + // If set, overrides the address eventually parsed from the executable. + Address uint64 + // The offset relative to given symbol. Useful when tracing an arbitrary point + // inside the frame of given symbol. + // + // Note: this field changed from being an absolute offset to being relative + // to Address. Offset uint64 + // Only set the uprobe on the given process ID. Useful when tracing + // shared library calls or programs that have many running instances. + PID int + // Automatically manage SDT reference counts (semaphores). + // + // If this field is set, the Kernel will increment/decrement the + // semaphore located in the process memory at the provided address on + // probe attach/detach. + // + // See also: + // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) + // github.com/torvalds/linux/commit/1cc33161a83d + // github.com/torvalds/linux/commit/a6ca88b241d5 + RefCtrOffset uint64 + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 } // To open a new Executable, use: // -// OpenExecutable("/bin/bash") +// OpenExecutable("/bin/bash") // // The returned value can then be used to open Uprobe(s). func OpenExecutable(path string) (*Executable, error) { @@ -64,42 +100,94 @@ func OpenExecutable(path string) (*Executable, error) { return nil, fmt.Errorf("parse ELF file: %w", err) } - var ex = Executable{ - path: path, - symbols: make(map[string]elf.Symbol), + if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + return nil, errors.New("the given file is not an executable or a shared object") } - if err := ex.addSymbols(se.Symbols); err != nil { - return nil, err + + ex := Executable{ + path: path, + addresses: make(map[string]uint64), } - if err := ex.addSymbols(se.DynamicSymbols); err != nil { + if err := ex.load(se); err != nil { return nil, err } return &ex, nil } -func (ex *Executable) addSymbols(f func() ([]elf.Symbol, error)) error { - // elf.Symbols and elf.DynamicSymbols return ErrNoSymbols if the section is not found. - syms, err := f() +func (ex *Executable) load(f *internal.SafeELFFile) error { + syms, err := f.Symbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + dynsyms, err := f.DynamicSymbols() if err != nil && !errors.Is(err, elf.ErrNoSymbols) { return err } + + syms = append(syms, dynsyms...) + for _, s := range syms { if elf.ST_TYPE(s.Info) != elf.STT_FUNC { // Symbol not associated with a function or other executable code. continue } - ex.symbols[s.Name] = s + + address := s.Value + + // Loop over ELF segments. + for _, prog := range f.Progs { + // Skip uninteresting segments. + if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { + continue + } + + if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { + // If the symbol value is contained in the segment, calculate + // the symbol offset. + // + // fn symbol offset = fn symbol VA - .text VA + .text offset + // + // stackoverflow.com/a/40249502 + address = s.Value - prog.Vaddr + prog.Off + break + } + } + + ex.addresses[s.Name] = address } + return nil } -func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) { - if s, ok := ex.symbols[symbol]; ok { - return &s, nil +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) { + if opts.Address > 0 { + return opts.Address + opts.Offset, nil } - return nil, fmt.Errorf("symbol %s not found", symbol) + + address, ok := ex.addresses[symbol] + if !ok { + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + } + + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if address == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ + "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) + } + + return address + opts.Offset, nil } // Uprobe attaches the given eBPF program to a perf event that fires when the @@ -112,24 +200,29 @@ func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) { // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. Functions provided by shared libraries can currently not -// be traced and will result in an ErrNotSupported. +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { u, err := ex.uprobe(symbol, prog, opts, false) if err != nil { return nil, err } - err = u.attach(prog) + lnk, err := attachPerfEvent(u, prog) if err != nil { u.Close() return nil, err } - return u, nil + return lnk, nil } // Uretprobe attaches the given eBPF program to a perf event that fires right @@ -141,24 +234,29 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. Functions provided by shared libraries can currently not -// be traced and will result in an ErrNotSupported. +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { u, err := ex.uprobe(symbol, prog, opts, true) if err != nil { return nil, err } - err = u.attach(prog) + lnk, err := attachPerfEvent(u, prog) if err != nil { u.Close() return nil, err } - return u, nil + return lnk, nil } // uprobe opens a perf event for the given binary/symbol and attaches prog to it. @@ -170,29 +268,38 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti if prog.Type() != ebpf.Kprobe { return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) } + if opts == nil { + opts = &UprobeOptions{} + } - var offset uint64 - if opts != nil && opts.Offset != 0 { - offset = opts.Offset - } else { - sym, err := ex.symbol(symbol) - if err != nil { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, err) - } + offset, err := ex.address(symbol, opts) + if err != nil { + return nil, err + } + + pid := opts.PID + if pid == 0 { + pid = perfAllThreads + } - // Symbols with location 0 from section undef are shared library calls and - // are relocated before the binary is executed. Dynamic linking is not - // implemented by the library, so mark this as unsupported for now. - if sym.Section == elf.SHN_UNDEF && sym.Value == 0 { - return nil, fmt.Errorf("cannot resolve %s library call '%s', "+ - "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) + if opts.RefCtrOffset != 0 { + if err := haveRefCtrOffsetPMU(); err != nil { + return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) } + } - offset = sym.Value + args := probeArgs{ + symbol: symbol, + path: ex.path, + offset: offset, + pid: pid, + refCtrOffset: opts.RefCtrOffset, + ret: ret, + cookie: opts.Cookie, } // Use uprobe PMU if the kernel has it available. - tp, err := pmuUprobe(symbol, ex.path, offset, ret) + tp, err := pmuUprobe(args) if err == nil { return tp, nil } @@ -201,7 +308,8 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // Use tracefs if uprobe PMU is missing. - tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, ret) + args.symbol = sanitizeSymbol(symbol) + tp, err = tracefsUprobe(args) if err != nil { return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) } @@ -210,23 +318,51 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // pmuUprobe opens a perf event based on the uprobe PMU. -func pmuUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - return pmuProbe(uprobeType, symbol, path, offset, ret) +func pmuUprobe(args probeArgs) (*perfEvent, error) { + return pmuProbe(uprobeType, args) } // tracefsUprobe creates a Uprobe tracefs entry. -func tracefsUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - return tracefsProbe(uprobeType, symbol, path, offset, ret) +func tracefsUprobe(args probeArgs) (*perfEvent, error) { + return tracefsProbe(uprobeType, args) } -// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. -func uprobeSanitizedSymbol(symbol string) string { - return rgxUprobeSymbol.ReplaceAllString(symbol, "_") +// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore. +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeSymbol(s string) string { + var b strings.Builder + b.Grow(len(s)) + var skip bool + for _, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + b.WriteByte(c) + + default: + if !skip { + b.WriteByte('_') + skip = true + } + } + } + + return b.String() } -// uprobePathOffset creates the PATH:OFFSET token for the tracefs api. -func uprobePathOffset(path string, offset uint64) string { - return fmt.Sprintf("%s:%#x", path, offset) +// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func uprobeToken(args probeArgs) string { + po := fmt.Sprintf("%s:%#x", args.path, args.offset) + + if args.refCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.refCtrOffset) + } + + return po } func uretprobeBit() (uint64, error) { diff --git a/agent/vendor/github.com/cilium/ebpf/link/xdp.go b/agent/vendor/github.com/cilium/ebpf/link/xdp.go new file mode 100644 index 00000000000..aa8dd3a4cb3 --- /dev/null +++ b/agent/vendor/github.com/cilium/ebpf/link/xdp.go @@ -0,0 +1,54 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" +) + +// XDPAttachFlags represents how XDP program will be attached to interface. +type XDPAttachFlags uint32 + +const ( + // XDPGenericMode (SKB) links XDP BPF program for drivers which do + // not yet support native XDP. + XDPGenericMode XDPAttachFlags = 1 << (iota + 1) + // XDPDriverMode links XDP BPF program into the driver’s receive path. + XDPDriverMode + // XDPOffloadMode offloads the entire XDP BPF program into hardware. + XDPOffloadMode +) + +type XDPOptions struct { + // Program must be an XDP BPF program. + Program *ebpf.Program + + // Interface is the interface index to attach program to. + Interface int + + // Flags is one of XDPAttachFlags (optional). + // + // Only one XDP mode should be set, without flag defaults + // to driver/generic mode (best effort). + Flags XDPAttachFlags +} + +// AttachXDP links an XDP BPF program to an XDP hook. +func AttachXDP(opts XDPOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.XDP { + return nil, fmt.Errorf("invalid program type %s, expected XDP", t) + } + + if opts.Interface < 1 { + return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) + } + + rawLink, err := AttachRawLink(RawLinkOptions{ + Program: opts.Program, + Attach: ebpf.AttachXDP, + Target: opts.Interface, + Flags: uint32(opts.Flags), + }) + + return rawLink, err +} diff --git a/agent/vendor/github.com/cilium/ebpf/linker.go b/agent/vendor/github.com/cilium/ebpf/linker.go index 6c2efef9e42..e6276b1829b 100644 --- a/agent/vendor/github.com/cilium/ebpf/linker.go +++ b/agent/vendor/github.com/cilium/ebpf/linker.go @@ -1,140 +1,238 @@ package ebpf import ( + "errors" "fmt" + "sync" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/btf" ) -// link resolves bpf-to-bpf calls. +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. // -// Each library may contain multiple functions / labels, and is only linked -// if prog references one of these functions. -// -// Libraries also linked. -func link(prog *ProgramSpec, libs []*ProgramSpec) error { - var ( - linked = make(map[*ProgramSpec]bool) - pending = []asm.Instructions{prog.Instructions} - insns asm.Instructions - ) - for len(pending) > 0 { - insns, pending = pending[0], pending[1:] - for _, lib := range libs { - if linked[lib] { - continue - } +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } - needed, err := needSection(insns, lib.Instructions) - if err != nil { - return fmt.Errorf("linking %s: %w", lib.Name, err) - } + if insns[0].Symbol() == "" { + return nil, errors.New("insns must start with a Symbol") + } - if !needed { - continue + var name string + progs := make(map[string]asm.Instructions) + for _, ins := range insns { + if sym := ins.Symbol(); sym != "" { + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) } + name = sym + } + + progs[name] = append(progs[name], ins) + } - linked[lib] = true - prog.Instructions = append(prog.Instructions, lib.Instructions...) - pending = append(pending, lib.Instructions) + return progs, nil +} - if prog.BTF != nil && lib.BTF != nil { - if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil { - return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) - } - } +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. +// +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. +// +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true } } - - return nil + return false } -func needSection(insns, section asm.Instructions) (bool, error) { - // A map of symbols to the libraries which contain them. - symbols, err := section.SymbolOffsets() +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// Passing a nil target will relocate against the running kernel. insns are +// modified in place. +func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } + + if len(relos) == 0 { + return nil + } + + target, err := maybeLoadKernelBTF(target) if err != nil { - return false, err + return err } - for _, ins := range insns { - if ins.Reference == "" { - continue - } + fixups, err := btf.CORERelocate(local, target, relos) + if err != nil { + return err + } - if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall { - continue + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("apply fixup %s: %w", &fixup, err) } + } + + return nil +} + +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() + } + + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) + } + + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } +} + +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] - if ins.Constant != -1 { - // This is already a valid call, no need to link again. + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(refs[prog])) + copy(pending, refs[prog]) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] + + if linked[ref] { + // We've already linked this ref, don't append instructions again. continue } - if _, ok := symbols[ins.Reference]; !ok { - // Symbol isn't available in this section + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. continue } - // At this point we know that at least one function in the - // library is called from insns, so we have to link it. - return true, nil + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) } - // None of the functions in the section are called. - return false, nil + return insns } -func fixupJumpsAndCalls(insns asm.Instructions) error { - symbolOffsets := make(map[string]asm.RawInstructionOffset) +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { iter := insns.Iterate() for iter.Next() { ins := iter.Ins - if ins.Symbol == "" { - continue - } - - if _, ok := symbolOffsets[ins.Symbol]; ok { - return fmt.Errorf("duplicate symbol %s", ins.Symbol) + // Map load was tagged with a Reference, but does not contain a Map pointer. + if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil { + return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference) } - symbolOffsets[ins.Symbol] = iter.Offset + fixupProbeReadKernel(ins) } - iter = insns.Iterate() - for iter.Next() { - i := iter.Index - offset := iter.Offset - ins := iter.Ins + return nil +} - if ins.Reference == "" { - continue - } +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } - switch { - case ins.IsFunctionCall() && ins.Constant == -1: - // Rewrite bpf to bpf call - callOffset, ok := symbolOffsets[ins.Reference] - if !ok { - return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference) - } + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return + } - ins.Constant = int64(callOffset - offset - 1) + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) + } +} - case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1: - // Rewrite jump to label - jumpOffset, ok := symbolOffsets[ins.Reference] - if !ok { - return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference) - } +var kernelBTF struct { + sync.Mutex + spec *btf.Spec +} - ins.Offset = int16(jumpOffset - offset - 1) +// maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise +// it returns spec unchanged. +// +// The kernel BTF is cached for the lifetime of the process. +func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) { + if spec != nil { + return spec, nil + } - case ins.IsLoadFromMap() && ins.MapPtr() == -1: - return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference) - } + kernelBTF.Lock() + defer kernelBTF.Unlock() + + if kernelBTF.spec != nil { + return kernelBTF.spec, nil } - return nil + var err error + kernelBTF.spec, err = btf.LoadKernelSpec() + return kernelBTF.spec, err } diff --git a/agent/vendor/github.com/cilium/ebpf/map.go b/agent/vendor/github.com/cilium/ebpf/map.go index f257d88c03e..e4a6c87e924 100644 --- a/agent/vendor/github.com/cilium/ebpf/map.go +++ b/agent/vendor/github.com/cilium/ebpf/map.go @@ -1,15 +1,19 @@ package ebpf import ( + "bytes" "errors" "fmt" "io" + "math/rand" "path/filepath" "reflect" - "strings" + "time" + "unsafe" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) @@ -18,7 +22,8 @@ var ( ErrKeyNotExist = errors.New("key does not exist") ErrKeyExist = errors.New("key already exists") ErrIterationAborted = errors.New("iteration aborted") - ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") ) // MapOptions control loading a map into the kernel. @@ -65,8 +70,16 @@ type MapSpec struct { // InnerMap is used as a template for ArrayOfMaps and HashOfMaps InnerMap *MapSpec + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader + + // The key and value type of this map. May be nil. + Key, Value btf.Type + // The BTF associated with this map. - BTF *btf.Map + BTF *btf.Spec } func (ms *MapSpec) String() string { @@ -82,12 +95,21 @@ func (ms *MapSpec) Copy() *MapSpec { } cpy := *ms + cpy.Contents = make([]MapKV, len(ms.Contents)) copy(cpy.Contents, ms.Contents) + cpy.InnerMap = ms.InnerMap.Copy() + return &cpy } +// hasBTF returns true if the MapSpec has a valid BTF spec and if its +// map type supports associated BTF metadata in the kernel. +func (ms *MapSpec) hasBTF() bool { + return ms.BTF != nil && ms.Type.hasBTF() +} + func (ms *MapSpec) clampPerfEventArraySize() error { if ms.Type != PerfEventArray { return nil @@ -105,6 +127,31 @@ func (ms *MapSpec) clampPerfEventArraySize() error { return nil } +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + + if ms.Value == nil { + return nil, nil, errMapNoBTFValue + } + + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + } + + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + // MapKV is used to initialize the contents of a Map. type MapKV struct { Key interface{} @@ -122,7 +169,8 @@ func (ms *MapSpec) checkCompatibility(m *Map) error { case m.valueSize != ms.ValueSize: return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible) - case m.maxEntries != ms.MaxEntries: + case !(ms.Type == PerfEventArray && ms.MaxEntries == 0) && + m.maxEntries != ms.MaxEntries: return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible) case m.flags != ms.Flags: @@ -142,7 +190,7 @@ func (ms *MapSpec) checkCompatibility(m *Map) error { // if you require custom encoding. type Map struct { name string - fd *internal.FD + fd *sys.FD typ MapType keySize uint32 valueSize uint32 @@ -157,18 +205,19 @@ type Map struct { // // You should not use fd after calling this function. func NewMapFromFD(fd int) (*Map, error) { - if fd < 0 { - return nil, errors.New("invalid fd") + f, err := sys.NewFD(fd) + if err != nil { + return nil, err } - return newMapFromFD(internal.NewFD(uint32(fd))) + return newMapFromFD(f) } -func newMapFromFD(fd *internal.FD) (*Map, error) { +func newMapFromFD(fd *sys.FD) (*Map, error) { info, err := newMapInfoFromFd(fd) if err != nil { fd.Close() - return nil, fmt.Errorf("get map info: %s", err) + return nil, fmt.Errorf("get map info: %w", err) } return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) @@ -188,14 +237,24 @@ func NewMap(spec *MapSpec) (*Map, error) { // // The caller is responsible for ensuring the process' rlimit is set // sufficiently high for locking memory during map creation. This can be done -// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions. +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. // // May return an error wrapping ErrMapIncompatible. func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { handles := newHandleCache() defer handles.close() - return newMapWithOptions(spec, opts, handles) + m, err := newMapWithOptions(spec, opts, handles) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + if err := m.finalize(spec); err != nil { + m.Close() + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil } func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) { @@ -207,8 +266,12 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ switch spec.Pinning { case PinByName: - if spec.Name == "" || opts.PinPath == "" { - return nil, fmt.Errorf("pin by name: missing Name or PinPath") + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") } path := filepath.Join(opts.PinPath, spec.Name) @@ -234,7 +297,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) } - var innerFd *internal.FD + var innerFd *sys.FD if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { if spec.InnerMap == nil { return nil, fmt.Errorf("%s requires InnerMap", spec.Type) @@ -244,16 +307,19 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return nil, errors.New("inner maps cannot be pinned") } - template, err := createMap(spec.InnerMap, nil, opts, handles) + template, err := spec.InnerMap.createMap(nil, opts, handles) if err != nil { - return nil, err + return nil, fmt.Errorf("inner map: %w", err) } defer template.Close() + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + innerFd = template.fd } - m, err := createMap(spec, innerFd, opts, handles) + m, err := spec.createMap(innerFd, opts, handles) if err != nil { return nil, err } @@ -262,14 +328,16 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ if spec.Pinning == PinByName { path := filepath.Join(opts.PinPath, spec.Name) if err := m.Pin(path); err != nil { - return nil, fmt.Errorf("pin map: %s", err) + return nil, fmt.Errorf("pin map: %w", err) } } return m, nil } -func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { closeOnError := func(closer io.Closer) { if err != nil { closer.Close() @@ -278,10 +346,18 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand spec = spec.Copy() + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + } + switch spec.Type { - case ArrayOfMaps: - fallthrough - case HashOfMaps: + case ArrayOfMaps, HashOfMaps: if err := haveNestedMaps(); err != nil { return nil, err } @@ -326,51 +402,63 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand return nil, fmt.Errorf("map create: %w", err) } } + if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + if err := haveNoPreallocMaps(); err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + } - attr := internal.BPFMapCreateAttr{ - MapType: uint32(spec.Type), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(spec.Type), KeySize: spec.KeySize, ValueSize: spec.ValueSize, MaxEntries: spec.MaxEntries, - Flags: spec.Flags, + MapFlags: spec.Flags, NumaNode: spec.NumaNode, } if inner != nil { - var err error - attr.InnerMapFd, err = inner.Value() - if err != nil { - return nil, fmt.Errorf("map create: %w", err) - } + attr.InnerMapFd = inner.Uint() } if haveObjName() == nil { - attr.MapName = internal.NewBPFObjName(spec.Name) + attr.MapName = sys.NewObjName(spec.Name) } - var btfDisabled bool - if spec.BTF != nil { - handle, err := handles.btfHandle(btf.MapSpec(spec.BTF)) - btfDisabled = errors.Is(err, btf.ErrNotSupported) - if err != nil && !btfDisabled { + if spec.hasBTF() { + handle, err := handles.btfHandle(spec.BTF) + if err != nil && !errors.Is(err, btf.ErrNotSupported) { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { - attr.BTFFd = uint32(handle.FD()) - attr.BTFKeyTypeID = uint32(btf.MapKey(spec.BTF).ID()) - attr.BTFValueTypeID = uint32(btf.MapValue(spec.BTF).ID()) + keyTypeID, err := spec.BTF.TypeID(spec.Key) + if err != nil { + return nil, err + } + + valueTypeID, err := spec.BTF.TypeID(spec.Value) + if err != nil { + return nil, err + } + + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = uint32(keyTypeID) + attr.BtfValueTypeId = uint32(valueTypeID) } } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { if errors.Is(err, unix.EPERM) { - return nil, fmt.Errorf("map create: RLIMIT_MEMLOCK may be too low: %w", err) + return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) } - if btfDisabled { + if !spec.hasBTF() { return nil, fmt.Errorf("map create without BTF: %w", err) } + if errors.Is(err, unix.EINVAL) && attr.MaxEntries == 0 { + return nil, fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } return nil, fmt.Errorf("map create: %w", err) } defer closeOnError(fd) @@ -380,20 +468,12 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand return nil, fmt.Errorf("map create: %w", err) } - if err := m.populate(spec.Contents); err != nil { - return nil, fmt.Errorf("map create: can't set initial contents: %w", err) - } - - if spec.Freeze { - if err := m.Freeze(); err != nil { - return nil, fmt.Errorf("can't freeze map: %w", err) - } - } - return m, nil } -func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { +// newMap allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. +func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { m := &Map{ name, fd, @@ -415,7 +495,7 @@ func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEn return nil, err } - m.fullValueSize = align(int(valueSize), 8) * possibleCPUs + m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs return m, nil } @@ -456,6 +536,12 @@ func (m *Map) Info() (*MapInfo, error) { return newMapInfoFromFd(m.fd) } +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = 4 + // Lookup retrieves a value from a Map. // // Calls Close() on valueOut if it is of type **Map or **Program, @@ -464,39 +550,58 @@ func (m *Map) Info() (*MapInfo, error) { // Returns an error if the key doesn't exist, see ErrKeyNotExist. func (m *Map) Lookup(key, valueOut interface{}) error { valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - if err := m.lookup(key, valuePtr); err != nil { + if err := m.lookup(key, valuePtr, 0); err != nil { return err } return m.unmarshalValue(valueOut, valueBytes) } -// LookupAndDelete retrieves and deletes a value from a Map. +// LookupWithFlags retrieves a value from a Map with flags. // -// Returns ErrKeyNotExist if the key doesn't exist. -func (m *Map) LookupAndDelete(key, valueOut interface{}) error { +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) - } - - if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil { - return fmt.Errorf("lookup and delete failed: %w", err) + if err := m.lookup(key, valuePtr, flags); err != nil { + return err } return m.unmarshalValue(valueOut, valueBytes) } +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.lookupAndDelete(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + return m.lookupAndDelete(key, valueOut, flags) +} + // LookupBytes gets a value from Map. // // Returns a nil value if a key doesn't exist. func (m *Map) LookupBytes(key interface{}) ([]byte, error) { valueBytes := make([]byte, m.fullValueSize) - valuePtr := internal.NewSlicePointer(valueBytes) + valuePtr := sys.NewSlicePointer(valueBytes) - err := m.lookup(key, valuePtr) + err := m.lookup(key, valuePtr, 0) if errors.Is(err, ErrKeyNotExist) { return nil, nil } @@ -504,18 +609,47 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) { return valueBytes, err } -func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error { +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { keyPtr, err := m.marshalKey(key) if err != nil { return fmt.Errorf("can't marshal key: %w", err) } - if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil { - return fmt.Errorf("lookup failed: %w", err) + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + return fmt.Errorf("lookup: %w", wrapMapError(err)) } return nil } +func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error { + valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) + + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return m.unmarshalValue(valueOut, valueBytes) +} + // MapUpdateFlags controls the behaviour of the Map.Update call. // // The exact semantics depend on the specific MapType. @@ -528,6 +662,8 @@ const ( UpdateNoExist MapUpdateFlags = 1 << (iota - 1) // UpdateExist updates an existing element. UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock ) // Put replaces or creates a value in map. @@ -549,8 +685,15 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { return fmt.Errorf("can't marshal value: %w", err) } - if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil { - return fmt.Errorf("update failed: %w", err) + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) } return nil @@ -565,8 +708,13 @@ func (m *Map) Delete(key interface{}) error { return fmt.Errorf("can't marshal key: %w", err) } - if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil { - return fmt.Errorf("delete failed: %w", err) + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) } return nil } @@ -598,7 +746,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error { // Returns nil if there are no more keys. func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { nextKey := make([]byte, m.keySize) - nextKeyPtr := internal.NewSlicePointer(nextKey) + nextKeyPtr := sys.NewSlicePointer(nextKey) err := m.nextKey(key, nextKeyPtr) if errors.Is(err, ErrKeyNotExist) { @@ -608,9 +756,9 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { return nextKey, err } -func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { var ( - keyPtr internal.Pointer + keyPtr sys.Pointer err error ) @@ -621,12 +769,77 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { } } - if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil { - return fmt.Errorf("next key failed: %w", err) + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte + guessKey, err = m.guessNonExistentKey() + if err != nil { + return err + } + + // Retry the syscall with a valid non-existing key. + attr.Key = sys.NewSlicePointer(guessKey) + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) + } + return nil } +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Provide an invalid value pointer to prevent a copy on the kernel side. + valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0))) + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return randKey, nil + } + } + + return nil, errors.New("couldn't find non-existing key") +} + // BatchLookup looks up many elements in a map at once. // // "keysOut" and "valuesOut" must be of type slice, a pointer @@ -638,7 +851,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error { // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) + return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) } // BatchLookupAndDelete looks up many elements in a map at once, @@ -653,10 +866,10 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) + return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) } -func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { +func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { if err := haveBatchAPI(); err != nil { return 0, err } @@ -676,29 +889,36 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va return 0, fmt.Errorf("keysOut and valuesOut must be the same length") } keyBuf := make([]byte, count*int(m.keySize)) - keyPtr := internal.NewSlicePointer(keyBuf) + keyPtr := sys.NewSlicePointer(keyBuf) valueBuf := make([]byte, count*int(m.fullValueSize)) - valuePtr := internal.NewSlicePointer(valueBuf) + valuePtr := sys.NewSlicePointer(valueBuf) + nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - var ( - startPtr internal.Pointer - err error - retErr error - ) + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + OutBatch: nextPtr, + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + var err error if startKey != nil { - startPtr, err = marshalPtr(startKey, int(m.keySize)) + attr.InBatch, err = marshalPtr(startKey, int(m.keySize)) if err != nil { return 0, err } } - nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) - ct, err := bpfMapBatch(cmd, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts) - if err != nil { - if !errors.Is(err, ErrKeyNotExist) { - return 0, err - } - retErr = ErrKeyNotExist + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr } err = m.unmarshalKey(nextKeyOut, nextBuf) @@ -711,9 +931,10 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va } err = unmarshalBytes(valuesOut, valueBuf) if err != nil { - retErr = err + return 0, err } - return int(ct), retErr + + return int(attr.Count), sysErr } // BatchUpdate updates the map with multiple keys and values @@ -737,7 +958,7 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er } var ( count = keysValue.Len() - valuePtr internal.Pointer + valuePtr sys.Pointer err error ) if count != valuesValue.Len() { @@ -751,9 +972,24 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er if err != nil { return 0, err } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts) - return int(ct), err + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil } // BatchDelete batch deletes entries in the map by keys. @@ -774,9 +1010,23 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { if err != nil { return 0, fmt.Errorf("cannot marshal keys: %v", err) } - var nilPtr internal.Pointer - ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts) - return int(ct), err + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil } // Iterate traverses a map. @@ -789,7 +1039,8 @@ func (m *Map) Iterate() *MapIterator { return newMapIterator(m) } -// Close removes a Map +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. func (m *Map) Close() error { if m == nil { // This makes it easier to clean up when iterating maps @@ -804,14 +1055,7 @@ func (m *Map) Close() error { // // Calling this function is invalid after Close has been called. func (m *Map) FD() int { - fd, err := m.fd.Value() - if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 - } - - return int(fd) + return m.fd.Int() } // Clone creates a duplicate of the Map. @@ -886,28 +1130,41 @@ func (m *Map) Freeze() error { return fmt.Errorf("can't freeze map: %w", err) } - if err := bpfMapFreeze(m.fd); err != nil { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { return fmt.Errorf("can't freeze map: %w", err) } return nil } -func (m *Map) populate(contents []MapKV) error { - for _, kv := range contents { +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { if err := m.Put(kv.Key, kv.Value); err != nil { - return fmt.Errorf("key %v: %w", kv.Key, err) + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) } } + + if spec.Freeze { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + return nil } -func (m *Map) marshalKey(data interface{}) (internal.Pointer, error) { +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { if data == nil { if m.keySize == 0 { // Queues have a key length of zero, so passing nil here is valid. - return internal.NewPointer(nil), nil + return sys.NewPointer(nil), nil } - return internal.Pointer{}, errors.New("can't use nil as key of map") + return sys.Pointer{}, errors.New("can't use nil as key of map") } return marshalPtr(data, int(m.keySize)) @@ -922,7 +1179,7 @@ func (m *Map) unmarshalKey(data interface{}, buf []byte) error { return unmarshalBytes(data, buf) } -func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { if m.typ.hasPerCPUValue() { return marshalPerCPUValue(data, int(m.valueSize)) } @@ -935,13 +1192,13 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { switch value := data.(type) { case *Map: if !m.typ.canStoreMap() { - return internal.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) } buf, err = marshalMap(value, int(m.valueSize)) case *Program: if !m.typ.canStoreProgram() { - return internal.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) } buf, err = marshalProgram(value, int(m.valueSize)) @@ -950,10 +1207,10 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) { } if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } func (m *Map) unmarshalValue(value interface{}, buf []byte) error { @@ -1017,7 +1274,10 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error { // LoadPinnedMap loads a Map from a BPF file. func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, err } @@ -1046,70 +1306,11 @@ func marshalMap(m *Map, length int) ([]byte, error) { return nil, fmt.Errorf("can't marshal map to %d bytes", length) } - fd, err := m.fd.Value() - if err != nil { - return nil, err - } - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, fd) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) return buf, nil } -func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error { - replaced := make(map[string]bool) - replace := func(name string, offset, size int, replacement interface{}) error { - if offset+size > len(value) { - return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size) - } - - buf, err := marshalBytes(replacement, size) - if err != nil { - return fmt.Errorf("marshal %s: %w", name, err) - } - - copy(value[offset:offset+size], buf) - replaced[name] = true - return nil - } - - switch parent := typ.(type) { - case *btf.Datasec: - for _, secinfo := range parent.Vars { - name := string(secinfo.Type.(*btf.Var).Name) - replacement, ok := replacements[name] - if !ok { - continue - } - - err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement) - if err != nil { - return err - } - } - - default: - return fmt.Errorf("patching %T is not supported", typ) - } - - if len(replaced) == len(replacements) { - return nil - } - - var missing []string - for name := range replacements { - if !replaced[name] { - missing = append(missing, name) - } - } - - if len(missing) == 1 { - return fmt.Errorf("unknown field: %s", missing[0]) - } - - return fmt.Errorf("unknown fields: %s", strings.Join(missing, ",")) -} - // MapIterator iterates a Map. // // See Map.Iterate. @@ -1204,29 +1405,20 @@ func (mi *MapIterator) Err() error { // // Returns ErrNotExist, if there is no next eBPF map. func MapGetNextID(startID MapID) (MapID, error) { - id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID)) - return MapID(id), err + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) } // NewMapFromID returns the map for a given id. // // Returns ErrNotExist, if there is no eBPF map with the given id. func NewMapFromID(id MapID) (*Map, error) { - fd, err := bpfObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, err } return newMapFromFD(fd) } - -// ID returns the systemwide unique ID of the map. -// -// Deprecated: use MapInfo.ID() instead. -func (m *Map) ID() (MapID, error) { - info, err := bpfGetMapInfoByFD(m.fd) - if err != nil { - return MapID(0), err - } - return MapID(info.id), nil -} diff --git a/agent/vendor/github.com/cilium/ebpf/marshalers.go b/agent/vendor/github.com/cilium/ebpf/marshalers.go index f2610eff9c7..544d17f35e1 100644 --- a/agent/vendor/github.com/cilium/ebpf/marshalers.go +++ b/agent/vendor/github.com/cilium/ebpf/marshalers.go @@ -8,9 +8,11 @@ import ( "fmt" "reflect" "runtime" + "sync" "unsafe" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" ) // marshalPtr converts an arbitrary value into a pointer suitable @@ -18,17 +20,17 @@ import ( // // As an optimization, it returns the original value if it is an // unsafe.Pointer. -func marshalPtr(data interface{}, length int) (internal.Pointer, error) { +func marshalPtr(data interface{}, length int) (sys.Pointer, error) { if ptr, ok := data.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil + return sys.NewPointer(ptr), nil } buf, err := marshalBytes(data, length) if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } // marshalBytes converts an arbitrary value into a byte buffer. @@ -39,6 +41,10 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) { // Returns an error if the given value isn't representable in exactly // length bytes. func marshalBytes(data interface{}, length int) (buf []byte, err error) { + if data == nil { + return nil, errors.New("can't marshal a nil value") + } + switch value := data.(type) { case encoding.BinaryMarshaler: buf, err = value.MarshalBinary() @@ -68,31 +74,32 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) { return buf, nil } -func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { +func makeBuffer(dst interface{}, length int) (sys.Pointer, []byte) { if ptr, ok := dst.(unsafe.Pointer); ok { - return internal.NewPointer(ptr), nil + return sys.NewPointer(ptr), nil } buf := make([]byte, length) - return internal.NewSlicePointer(buf), buf + return sys.NewSlicePointer(buf), buf +} + +var bytesReaderPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Reader) + }, } // unmarshalBytes converts a byte buffer into an arbitrary value. // // Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since // those have special cases that allow more types to be encoded. +// +// The common int32 and int64 types are directly handled to avoid +// unnecessary heap allocations as happening in the default case. func unmarshalBytes(data interface{}, buf []byte) error { switch value := data.(type) { case unsafe.Pointer: - // This could be solved in Go 1.17 by unsafe.Slice instead. (https://github.com/golang/go/issues/19367) - // We could opt for removing unsafe.Pointer support in the lib as well. - sh := &reflect.SliceHeader{ //nolint:govet - Data: uintptr(value), - Len: len(buf), - Cap: len(buf), - } - - dst := *(*[]byte)(unsafe.Pointer(sh)) + dst := unsafe.Slice((*byte)(value), len(buf)) copy(dst, buf) runtime.KeepAlive(value) return nil @@ -106,12 +113,38 @@ func unmarshalBytes(data interface{}, buf []byte) error { case *[]byte: *value = buf return nil + case *int32: + if len(buf) < 4 { + return errors.New("int32 requires 4 bytes") + } + *value = int32(internal.NativeEndian.Uint32(buf)) + return nil + case *uint32: + if len(buf) < 4 { + return errors.New("uint32 requires 4 bytes") + } + *value = internal.NativeEndian.Uint32(buf) + return nil + case *int64: + if len(buf) < 8 { + return errors.New("int64 requires 8 bytes") + } + *value = int64(internal.NativeEndian.Uint64(buf)) + return nil + case *uint64: + if len(buf) < 8 { + return errors.New("uint64 requires 8 bytes") + } + *value = internal.NativeEndian.Uint64(buf) + return nil case string: return errors.New("require pointer to string") case []byte: return errors.New("require pointer to []byte") default: - rd := bytes.NewReader(buf) + rd := bytesReaderPool.Get().(*bytes.Reader) + rd.Reset(buf) + defer bytesReaderPool.Put(rd) if err := binary.Read(rd, internal.NativeEndian, value); err != nil { return fmt.Errorf("decoding %T: %v", value, err) } @@ -125,38 +158,38 @@ func unmarshalBytes(data interface{}, buf []byte) error { // Values are initialized to zero if the slice has less elements than CPUs. // // slice must have a type like []elementType. -func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) { +func marshalPerCPUValue(slice interface{}, elemLength int) (sys.Pointer, error) { sliceType := reflect.TypeOf(slice) if sliceType.Kind() != reflect.Slice { - return internal.Pointer{}, errors.New("per-CPU value requires slice") + return sys.Pointer{}, errors.New("per-CPU value requires slice") } possibleCPUs, err := internal.PossibleCPUs() if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } sliceValue := reflect.ValueOf(slice) sliceLen := sliceValue.Len() if sliceLen > possibleCPUs { - return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") + return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") } - alignedElemLength := align(elemLength, 8) + alignedElemLength := internal.Align(elemLength, 8) buf := make([]byte, alignedElemLength*possibleCPUs) for i := 0; i < sliceLen; i++ { elem := sliceValue.Index(i).Interface() elemBytes, err := marshalBytes(elem, elemLength) if err != nil { - return internal.Pointer{}, err + return sys.Pointer{}, err } offset := i * alignedElemLength copy(buf[offset:offset+elemLength], elemBytes) } - return internal.NewSlicePointer(buf), nil + return sys.NewSlicePointer(buf), nil } // unmarshalPerCPUValue decodes a buffer into a slice containing one value per @@ -212,7 +245,3 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro reflect.ValueOf(slicePtr).Elem().Set(slice) return nil } - -func align(n, alignment int) int { - return (int(n) + alignment - 1) / alignment * alignment -} diff --git a/agent/vendor/github.com/cilium/ebpf/prog.go b/agent/vendor/github.com/cilium/ebpf/prog.go index 13bdb6ddad1..675edc711d7 100644 --- a/agent/vendor/github.com/cilium/ebpf/prog.go +++ b/agent/vendor/github.com/cilium/ebpf/prog.go @@ -5,23 +5,22 @@ import ( "encoding/binary" "errors" "fmt" - "io" "math" "path/filepath" + "runtime" "strings" "time" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) // ErrNotSupported is returned whenever the kernel doesn't support a feature. var ErrNotSupported = internal.ErrNotSupported -var errUnsatisfiedReference = errors.New("unsatisfied reference") - // ProgramID represents the unique ID of an eBPF program. type ProgramID uint32 @@ -44,12 +43,13 @@ type ProgramOptions struct { // Controls the output buffer size for the verifier. Defaults to // DefaultVerifierLogSize. LogSize int - // An ELF containing the target BTF for this program. It is used both to - // find the correct function to trace and to apply CO-RE relocations. + // Type information used for CO-RE relocations and when attaching to + // kernel functions. + // // This is useful in environments where the kernel BTF is not available // (containers) or where it is in a non-standard location. Defaults to - // use the kernel BTF from a well-known location. - TargetBTF io.ReaderAt + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec } // ProgramSpec defines a Program. @@ -57,16 +57,32 @@ type ProgramSpec struct { // Name is passed to the kernel as a debug aid. Must only contain // alpha numeric and '_' characters. Name string + // Type determines at which hook in the kernel a program will run. - Type ProgramType + Type ProgramType + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. AttachType AttachType - // Name of a kernel data structure to attach to. It's interpretation - // depends on Type and AttachType. - AttachTo string + + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + + // The program to attach to. Must be provided manually. + AttachTarget *Program + + // The name of the ELF section this program orininated from. + SectionName string + Instructions asm.Instructions + // Flags is passed to the kernel and specifies additional program // load attributes. Flags uint32 + // License of the program. Some helpers are only available if // the license is deemed compatible with the GPL. // @@ -82,7 +98,7 @@ type ProgramSpec struct { // The BTF associated with this program. Changing Instructions // will most likely invalidate the contained data, and may // result in errors when attempting to load it into the kernel. - BTF *btf.Program + BTF *btf.Spec // The byte order this program was compiled for, may be nil. ByteOrder binary.ByteOrder @@ -107,6 +123,8 @@ func (ps *ProgramSpec) Tag() (string, error) { return ps.Instructions.Tag(internal.NativeEndian) } +type VerifierError = internal.VerifierError + // Program represents BPF program loaded into the kernel. // // It is not safe to close a Program which is used by other goroutines. @@ -115,7 +133,7 @@ type Program struct { // otherwise it is empty. VerifierLog string - fd *internal.FD + fd *sys.FD name string pinnedPath string typ ProgramType @@ -123,8 +141,7 @@ type Program struct { // NewProgram creates a new Program. // -// Loading a program for the first time will perform -// feature detection by loading small, temporary programs. +// See NewProgramWithOptions for details. func NewProgram(spec *ProgramSpec) (*Program, error) { return NewProgramWithOptions(spec, ProgramOptions{}) } @@ -133,12 +150,19 @@ func NewProgram(spec *ProgramSpec) (*Program, error) { // // Loading a program for the first time will perform // feature detection by loading small, temporary programs. +// +// Returns an error wrapping VerifierError if the program or its BTF is rejected +// by the kernel. func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + handles := newHandleCache() defer handles.close() prog, err := newProgramWithOptions(spec, opts, handles) - if errors.Is(err, errUnsatisfiedReference) { + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) } return prog, err @@ -146,7 +170,11 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) { if len(spec.Instructions) == 0 { - return nil, errors.New("Instructions cannot be empty") + return nil, errors.New("instructions cannot be empty") + } + + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") } if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { @@ -166,89 +194,85 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand kv = v.Kernel() } - attr := &bpfProgLoadAttr{ - progType: spec.Type, - progFlags: spec.Flags, - expectedAttachType: spec.AttachType, - license: internal.NewStringPointer(spec.License), - kernelVersion: kv, + attr := &sys.ProgLoadAttr{ + ProgType: sys.ProgType(spec.Type), + ProgFlags: spec.Flags, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, } if haveObjName() == nil { - attr.progName = internal.NewBPFObjName(spec.Name) + attr.ProgName = sys.NewObjName(spec.Name) } - var err error - var targetBTF *btf.Spec - if opts.TargetBTF != nil { - targetBTF, err = handles.btfSpec(opts.TargetBTF) - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } - } + kernelTypes := opts.KernelTypes + + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) var btfDisabled bool - var core btf.COREFixups if spec.BTF != nil { - core, err = btf.ProgramFixups(spec.BTF, targetBTF) - if err != nil { - return nil, fmt.Errorf("CO-RE relocations: %w", err) + if err := applyRelocations(insns, spec.BTF, kernelTypes); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) } - handle, err := handles.btfHandle(btf.ProgramSpec(spec.BTF)) + handle, err := handles.btfHandle(spec.BTF) btfDisabled = errors.Is(err, btf.ErrNotSupported) if err != nil && !btfDisabled { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { - attr.progBTFFd = uint32(handle.FD()) + attr.ProgBtfFd = uint32(handle.FD()) - recSize, bytes, err := btf.ProgramLineInfos(spec.BTF) + fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID) if err != nil { - return nil, fmt.Errorf("get BTF line infos: %w", err) + return nil, err } - attr.lineInfoRecSize = recSize - attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.lineInfo = internal.NewSlicePointer(bytes) - recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF) - if err != nil { - return nil, fmt.Errorf("get BTF function infos: %w", err) - } - attr.funcInfoRecSize = recSize - attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.funcInfo = internal.NewSlicePointer(bytes) - } - } + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.NewSlicePointer(fib) - insns, err := core.Apply(spec.Instructions) - if err != nil { - return nil, fmt.Errorf("CO-RE fixup: %w", err) + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.NewSlicePointer(lib) + } } - if err := fixupJumpsAndCalls(insns); err != nil { + if err := fixupAndValidate(insns); err != nil { return nil, err } - buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize)) - err = insns.Marshal(buf, internal.NativeEndian) + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + err := insns.Marshal(buf, internal.NativeEndian) if err != nil { return nil, err } bytecode := buf.Bytes() - attr.instructions = internal.NewSlicePointer(bytecode) - attr.insCount = uint32(len(bytecode) / asm.InstructionSize) + attr.Insns = sys.NewSlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) - if spec.AttachTo != "" { - target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType) + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) if err != nil { - return nil, err + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) } - if target != nil { - attr.attachBTFID = target.ID() + + attr.AttachBtfId = uint32(targetID) + attr.AttachProgFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + targetID, err := findTargetInKernel(kernelTypes, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) } + + attr.AttachBtfId = uint32(targetID) } logSize := DefaultVerifierLogSize @@ -259,36 +283,46 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand var logBuf []byte if opts.LogLevel > 0 { logBuf = make([]byte, logSize) - attr.logLevel = opts.LogLevel - attr.logSize = uint32(len(logBuf)) - attr.logBuf = internal.NewSlicePointer(logBuf) + attr.LogLevel = opts.LogLevel + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = sys.NewSlicePointer(logBuf) } - fd, err := bpfProgLoad(attr) + fd, err := sys.ProgLoad(attr) if err == nil { - return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil } - logErr := err if opts.LogLevel == 0 && opts.LogSize >= 0 { // Re-run with the verifier enabled to get better error messages. logBuf = make([]byte, logSize) - attr.logLevel = 1 - attr.logSize = uint32(len(logBuf)) - attr.logBuf = internal.NewSlicePointer(logBuf) + attr.LogLevel = 1 + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = sys.NewSlicePointer(logBuf) + _, _ = sys.ProgLoad(attr) + } + + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } - _, logErr = bpfProgLoad(attr) - } + fallthrough - if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { - // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can - // check that the log is empty to reduce false positives. - return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr) + case errors.Is(err, unix.EINVAL): + if hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } + } } - err = internal.ErrorWithLog(err, logBuf, logErr) + err = internal.ErrorWithLog(err, logBuf) if btfDisabled { - return nil, fmt.Errorf("load program without BTF: %w", err) + return nil, fmt.Errorf("load program: %w (BTF disabled)", err) } return nil, fmt.Errorf("load program: %w", err) } @@ -299,18 +333,21 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand // // Requires at least Linux 4.10. func NewProgramFromFD(fd int) (*Program, error) { - if fd < 0 { - return nil, errors.New("invalid fd") + f, err := sys.NewFD(fd) + if err != nil { + return nil, err } - return newProgramFromFD(internal.NewFD(uint32(fd))) + return newProgramFromFD(f) } // NewProgramFromID returns the program for a given id. // // Returns ErrNotExist, if there is no eBPF program with the given id. func NewProgramFromID(id ProgramID) (*Program, error) { - fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) if err != nil { return nil, fmt.Errorf("get program by id: %w", err) } @@ -318,7 +355,7 @@ func NewProgramFromID(id ProgramID) (*Program, error) { return newProgramFromFD(fd) } -func newProgramFromFD(fd *internal.FD) (*Program, error) { +func newProgramFromFD(fd *sys.FD) (*Program, error) { info, err := newProgramInfoFromFd(fd) if err != nil { fd.Close() @@ -347,18 +384,29 @@ func (p *Program) Info() (*ProgramInfo, error) { return newProgramInfoFromFd(p.fd) } -// FD gets the file descriptor of the Program. +// Handle returns a reference to the program's type information in the kernel. // -// It is invalid to call this function after Close has been called. -func (p *Program) FD() int { - fd, err := p.fd.Value() +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() if err != nil { - // Best effort: -1 is the number most likely to be an - // invalid file descriptor. - return -1 + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) } - return int(fd) + return btf.NewHandleFromID(id) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + return p.fd.Int() } // Clone creates a duplicate of the Program. @@ -412,7 +460,9 @@ func (p *Program) IsPinned() bool { return p.pinnedPath != "" } -// Close unloads the program from the kernel. +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. func (p *Program) Close() error { if p == nil { return nil @@ -421,6 +471,28 @@ func (p *Program) Close() error { return p.fd.Close() } +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Number of times to run Program. Optional field. Defaults to 1. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + Reset func() +} + // Test runs the Program in the kernel with the given input and returns the // value returned by the eBPF program. outLen may be zero. // @@ -429,11 +501,38 @@ func (p *Program) Close() error { // // This function requires at least Linux 4.12. func (p *Program) Test(in []byte) (uint32, []byte, error) { - ret, out, _, err := p.testRun(in, 1, nil) + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.testRun(&opts) if err != nil { return ret, nil, fmt.Errorf("can't test program: %w", err) } - return ret, out, nil + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + ret, _, err := p.testRun(opts) + if err != nil { + return ret, fmt.Errorf("can't test program: %w", err) + } + return ret, nil } // Benchmark runs the Program with the given input for a number of times @@ -448,7 +547,17 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) { // // This function requires at least Linux 4.12. func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { - ret, _, total, err := p.testRun(in, repeat, reset) + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.testRun(&opts) if err != nil { return ret, total, fmt.Errorf("can't benchmark program: %w", err) } @@ -457,6 +566,7 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error { prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. Type: SocketFilter, Instructions: asm.Instructions{ asm.LoadImm(asm.R0, 0, asm.DWord), @@ -472,88 +582,109 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e // Programs require at least 14 bytes input in := make([]byte, 14) - attr := bpfProgTestRunAttr{ - fd: uint32(prog.FD()), - dataSizeIn: uint32(len(in)), - dataIn: internal.NewSlicePointer(in), + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.NewSlicePointer(in), } - err = bpfProgTestRun(&attr) - if errors.Is(err, unix.EINVAL) { + err = sys.ProgRun(&attr) + switch { + case errors.Is(err, unix.EINVAL): // Check for EINVAL specifically, rather than err != nil since we // otherwise misdetect due to insufficient permissions. return internal.ErrNotSupported - } - if errors.Is(err, unix.EINTR) { + + case errors.Is(err, unix.EINTR): // We know that PROG_TEST_RUN is supported if we get EINTR. return nil - } - return err -}) -func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) { - if uint(repeat) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("repeat is too high") + case errors.Is(err, unix.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil } - if len(in) == 0 { - return 0, nil, 0, fmt.Errorf("missing input") - } + return err +}) - if uint(len(in)) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("input is too long") +func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") } if err := haveProgTestRun(); err != nil { - return 0, nil, 0, err + return 0, 0, err } - // Older kernels ignore the dataSizeOut argument when copying to user space. - // Combined with things like bpf_xdp_adjust_head() we don't really know what the final - // size will be. Hence we allocate an output buffer which we hope will always be large - // enough, and panic if the kernel wrote past the end of the allocation. - // See https://patchwork.ozlabs.org/cover/1006822/ - out := make([]byte, len(in)+outputPad) + var ctxBytes []byte + if opts.Context != nil { + ctx := new(bytes.Buffer) + if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } + ctxBytes = ctx.Bytes() + } - fd, err := p.fd.Value() - if err != nil { - return 0, nil, 0, err + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) } - attr := bpfProgTestRunAttr{ - fd: fd, - dataSizeIn: uint32(len(in)), - dataSizeOut: uint32(len(out)), - dataIn: internal.NewSlicePointer(in), - dataOut: internal.NewSlicePointer(out), - repeat: uint32(repeat), + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.NewSlicePointer(opts.Data), + DataOut: sys.NewSlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxBytes)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.NewSlicePointer(ctxBytes), + CtxOut: sys.NewSlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, } for { - err = bpfProgTestRun(&attr) + err := sys.ProgRun(&attr) if err == nil { break } if errors.Is(err, unix.EINTR) { - if reset != nil { - reset() + if opts.Reset != nil { + opts.Reset() } continue } - return 0, nil, 0, fmt.Errorf("can't run test: %w", err) + if errors.Is(err, unix.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, fmt.Errorf("can't run test: %w", err) } - if int(attr.dataSizeOut) > cap(out) { - // Houston, we have a problem. The program created more data than we allocated, - // and the kernel wrote past the end of our buffer. - panic("kernel wrote past end of output buffer") + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] } - out = out[:int(attr.dataSizeOut)] - total := time.Duration(attr.duration) * time.Nanosecond - return attr.retval, out, total, nil + if len(ctxOut) != 0 { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } + } + + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, total, nil } func unmarshalProgram(buf []byte) (*Program, error) { @@ -572,70 +703,19 @@ func marshalProgram(p *Program, length int) ([]byte, error) { return nil, fmt.Errorf("can't marshal program to %d bytes", length) } - value, err := p.fd.Value() - if err != nil { - return nil, err - } - buf := make([]byte, 4) - internal.NativeEndian.PutUint32(buf, value) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) return buf, nil } -// Attach a Program. -// -// Deprecated: use link.RawAttachProgram instead. -func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgAttachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: pfd, - AttachType: uint32(typ), - AttachFlags: uint32(flags), - } - - return internal.BPFProgAttach(&attr) -} - -// Detach a Program. -// -// Deprecated: use link.RawDetachProgram instead. -func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - if flags != 0 { - return errors.New("flags must be zero") - } - - pfd, err := p.fd.Value() - if err != nil { - return err - } - - attr := internal.BPFProgDetachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: pfd, - AttachType: uint32(typ), - } - - return internal.BPFProgDetach(&attr) -} - // LoadPinnedProgram loads a Program from a BPF file. // // Requires at least Linux 4.11. func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { - fd, err := internal.BPFObjGet(fileName, opts.Marshal()) + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) if err != nil { return nil, err } @@ -669,60 +749,127 @@ func SanitizeName(name string, replacement rune) string { // // Returns ErrNotExist, if there is no next eBPF program. func ProgramGetNextID(startID ProgramID) (ProgramID, error) { - id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID)) - return ProgramID(id), err + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) } -// ID returns the systemwide unique ID of the program. +// BindMap binds map to the program and is only released once program is released. // -// Deprecated: use ProgramInfo.ID() instead. -func (p *Program) ID() (ProgramID, error) { - info, err := bpfGetProgInfoByFD(p.fd) - if err != nil { - return ProgramID(0), err +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), } - return ProgramID(info.id), nil + + return sys.ProgBindMap(attr) } -func resolveBTFType(kernel *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// spec may be nil and defaults to the canonical kernel BTF. name together with +// progType and attachType determine which type we need to attach to. +// +// Returns errUnrecognizedAttachType. +func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { type match struct { p ProgramType a AttachType } - var target btf.Type - var typeName, featureName string + var ( + typeName, featureName string + isBTFTypeFunc = true + ) + switch (match{progType, attachType}) { case match{LSM, AttachLSMMac}: - target = new(btf.Func) typeName = "bpf_lsm_" + name featureName = name + " LSM hook" - case match{Tracing, AttachTraceIter}: - target = new(btf.Func) typeName = "bpf_iter_" + name featureName = name + " iterator" - + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + case match{Tracing, AttachModifyReturn}: + typeName = name + featureName = fmt.Sprintf("fmod_ret %s", name) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + isBTFTypeFunc = false default: - return nil, nil + return 0, errUnrecognizedAttachType } - if kernel == nil { - var err error - kernel, err = btf.LoadKernelSpec() - if err != nil { - return nil, fmt.Errorf("load kernel spec: %w", err) - } + spec, err := maybeLoadKernelBTF(spec) + if err != nil { + return 0, fmt.Errorf("load kernel spec: %w", err) } - err := kernel.FindType(typeName, target) - if errors.Is(err, btf.ErrNotFound) { - return nil, &internal.UnsupportedFeatureError{ - Name: featureName, + var target btf.Type + if isBTFTypeFunc { + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + target = targetFunc + } else { + var targetTypedef *btf.Typedef + err = spec.TypeByName(typeName, &targetTypedef) + target = targetTypedef + } + + if err != nil { + if errors.Is(err, btf.ErrNotFound) { + return 0, &internal.UnsupportedFeatureError{ + Name: featureName, + } } + return 0, fmt.Errorf("find target for %s: %w", featureName, err) } + + return spec.TypeID(target) +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}: + typeName = name + default: + return 0, errUnrecognizedAttachType + } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) if err != nil { - return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) + return 0, err } - return target, nil + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + if err != nil { + return 0, fmt.Errorf("find target %s: %w", typeName, err) + } + + return spec.TypeID(targetFunc) } diff --git a/agent/vendor/github.com/cilium/ebpf/run-tests.sh b/agent/vendor/github.com/cilium/ebpf/run-tests.sh index e2437beed2a..c21cca9e5e7 100644 --- a/agent/vendor/github.com/cilium/ebpf/run-tests.sh +++ b/agent/vendor/github.com/cilium/ebpf/run-tests.sh @@ -1,11 +1,11 @@ -#!/bin/bash +#!/usr/bin/env bash # Test the current package under a different kernel. # Requires virtme and qemu to be installed. # Examples: # Run all tests on a 5.4 kernel # $ ./run-tests.sh 5.4 # Run a subset of tests: -# $ ./run-tests.sh 5.4 go test ./link +# $ ./run-tests.sh 5.4 ./link set -euo pipefail @@ -48,19 +48,31 @@ if [[ "${1:-}" = "--exec-vm" ]]; then rm "${output}/fake-stdin" fi - $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ - --rwdir="${testdir}=${testdir}" \ - --rodir=/run/input="${input}" \ - --rwdir=/run/output="${output}" \ - --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ - --qemu-opts -smp 2 # need at least two CPUs for some tests - - if [[ ! -e "${output}/success" ]]; then - exit 1 - fi - + for ((i = 0; i < 3; i++)); do + if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ + --rwdir="${testdir}=${testdir}" \ + --rodir=/run/input="${input}" \ + --rwdir=/run/output="${output}" \ + --script-sh "PATH=\"$PATH\" CI_MAX_KERNEL_VERSION="${CI_MAX_KERNEL_VERSION:-}" \"$script\" --exec-test $cmd" \ + --kopt possible_cpus=2; then # need at least two CPUs for some tests + exit 23 + fi + + if [[ -e "${output}/status" ]]; then + break + fi + + if [[ -v CI ]]; then + echo "Retrying test run due to qemu crash" + continue + fi + + exit 42 + done + + rc=$(<"${output}/status") $sudo rm -r "$output" - exit 0 + exit $rc elif [[ "${1:-}" = "--exec-test" ]]; then shift @@ -71,13 +83,16 @@ elif [[ "${1:-}" = "--exec-test" ]]; then export KERNEL_SELFTESTS="/run/input/bpf" fi - dmesg -C - if ! "$@"; then - dmesg - exit 1 + if [[ -f "/run/input/bpf/bpf_testmod/bpf_testmod.ko" ]]; then + insmod "/run/input/bpf/bpf_testmod/bpf_testmod.ko" fi - touch "/run/output/success" - exit 0 + + dmesg --clear + rc=0 + "$@" || rc=$? + dmesg + echo $rc > "/run/output/status" + exit $rc # this return code is "swallowed" by qemu fi readonly kernel_version="${1:-}" @@ -88,33 +103,40 @@ fi shift readonly kernel="linux-${kernel_version}.bz" -readonly selftests="linux-${kernel_version}-selftests-bpf.bz" +readonly selftests="linux-${kernel_version}-selftests-bpf.tgz" readonly input="$(mktemp -d)" readonly tmp_dir="${TMPDIR:-/tmp}" readonly branch="${BRANCH:-master}" fetch() { echo Fetching "${1}" - wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" + pushd "${tmp_dir}" > /dev/null + curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" + local ret=$? + popd > /dev/null + return $ret } fetch "${kernel}" cp "${tmp_dir}/${kernel}" "${input}/bzImage" if fetch "${selftests}"; then + echo "Decompressing selftests" mkdir "${input}/bpf" - tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf" + tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf" else echo "No selftests found, disabling" fi -args=(-v -short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) +args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) if (( $# > 0 )); then args=("$@") fi export GOFLAGS=-mod=readonly export CGO_ENABLED=0 +# LINUX_VERSION_CODE test compares this to discovered value. +export KERNEL_VERSION="${kernel_version}" echo Testing on "${kernel_version}" go test -exec "$script --exec-vm $input" "${args[@]}" diff --git a/agent/vendor/github.com/cilium/ebpf/syscalls.go b/agent/vendor/github.com/cilium/ebpf/syscalls.go index f5a38549bb7..e5c270a5585 100644 --- a/agent/vendor/github.com/cilium/ebpf/syscalls.go +++ b/agent/vendor/github.com/cilium/ebpf/syscalls.go @@ -1,21 +1,16 @@ package ebpf import ( + "bytes" "errors" "fmt" - "os" - "unsafe" + "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) -// ErrNotExist is returned when loading a non-existing map or program. -// -// Deprecated: use os.ErrNotExist instead. -var ErrNotExist = os.ErrNotExist - // invalidBPFObjNameChar returns true if char may not appear in // a BPF object name. func invalidBPFObjNameChar(char rune) bool { @@ -37,154 +32,24 @@ func invalidBPFObjNameChar(char rune) bool { } } -type bpfMapOpAttr struct { - mapFd uint32 - padding uint32 - key internal.Pointer - value internal.Pointer - flags uint64 -} - -type bpfBatchMapOpAttr struct { - inBatch internal.Pointer - outBatch internal.Pointer - keys internal.Pointer - values internal.Pointer - count uint32 - mapFd uint32 - elemFlags uint64 - flags uint64 -} - -type bpfMapInfo struct { - map_type uint32 // since 4.12 1e2709769086 - id uint32 - key_size uint32 - value_size uint32 - max_entries uint32 - map_flags uint32 - name internal.BPFObjName // since 4.15 ad5b177bd73f - ifindex uint32 // since 4.16 52775b33bb50 - btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6 - netns_dev uint64 // since 4.16 52775b33bb50 - netns_ino uint64 - btf_id uint32 // since 4.18 78958fca7ead - btf_key_type_id uint32 // since 4.18 9b2cf328b2ec - btf_value_type_id uint32 -} - -type bpfProgLoadAttr struct { - progType ProgramType - insCount uint32 - instructions internal.Pointer - license internal.Pointer - logLevel uint32 - logSize uint32 - logBuf internal.Pointer - kernelVersion uint32 // since 4.1 2541517c32be - progFlags uint32 // since 4.11 e07b98d9bffe - progName internal.BPFObjName // since 4.15 067cae47771c - progIfIndex uint32 // since 4.15 1f6f4cb7ba21 - expectedAttachType AttachType // since 4.17 5e43f899b03a - progBTFFd uint32 - funcInfoRecSize uint32 - funcInfo internal.Pointer - funcInfoCnt uint32 - lineInfoRecSize uint32 - lineInfo internal.Pointer - lineInfoCnt uint32 - attachBTFID btf.TypeID - attachProgFd uint32 -} - -type bpfProgInfo struct { - prog_type uint32 - id uint32 - tag [unix.BPF_TAG_SIZE]byte - jited_prog_len uint32 - xlated_prog_len uint32 - jited_prog_insns internal.Pointer - xlated_prog_insns internal.Pointer - load_time uint64 // since 4.15 cb4d2b3f03d8 - created_by_uid uint32 - nr_map_ids uint32 - map_ids internal.Pointer - name internal.BPFObjName // since 4.15 067cae47771c - ifindex uint32 - gpl_compatible uint32 - netns_dev uint64 - netns_ino uint64 - nr_jited_ksyms uint32 - nr_jited_func_lens uint32 - jited_ksyms internal.Pointer - jited_func_lens internal.Pointer - btf_id uint32 - func_info_rec_size uint32 - func_info internal.Pointer - nr_func_info uint32 - nr_line_info uint32 - line_info internal.Pointer - jited_line_info internal.Pointer - nr_jited_line_info uint32 - line_info_rec_size uint32 - jited_line_info_rec_size uint32 - nr_prog_tags uint32 - prog_tags internal.Pointer - run_time_ns uint64 - run_cnt uint64 -} - -type bpfProgTestRunAttr struct { - fd uint32 - retval uint32 - dataSizeIn uint32 - dataSizeOut uint32 - dataIn internal.Pointer - dataOut internal.Pointer - repeat uint32 - duration uint32 -} - -type bpfGetFDByIDAttr struct { - id uint32 - next uint32 -} - -type bpfMapFreezeAttr struct { - mapFd uint32 -} - -type bpfObjGetNextIDAttr struct { - startID uint32 - nextID uint32 - openFlags uint32 -} - -func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) { - for { - fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errors.Is(err, unix.EAGAIN) { - continue - } - - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err } -} + bytecode := buf.Bytes() -func bpfProgTestRun(attr *bpfProgTestRunAttr) error { - _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - return err + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) } var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { - _, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(ArrayOfMaps), + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), KeySize: 4, ValueSize: 4, MaxEntries: 1, @@ -203,12 +68,12 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_RDONLY_PROG, + MapFlags: unix.BPF_F_RDONLY_PROG, }) if err != nil { return internal.ErrNotSupported @@ -219,12 +84,12 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_MMAPABLE, + MapFlags: unix.BPF_F_MMAPABLE, }) if err != nil { return internal.ErrNotSupported @@ -235,12 +100,12 @@ var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { // This checks BPF_F_INNER_MAP, which appeared in 5.10. - m, err := internal.BPFMapCreate(&internal.BPFMapCreateAttr{ - MapType: uint32(Array), + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - Flags: unix.BPF_F_INNER_MAP, + MapFlags: unix.BPF_F_INNER_MAP, }) if err != nil { return internal.ErrNotSupported @@ -249,111 +114,21 @@ var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { return nil }) -func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - } - _, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: valueOut, - flags: flags, - } - _, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - } - _, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error { - fd, err := m.Value() - if err != nil { - return err - } - - attr := bpfMapOpAttr{ - mapFd: fd, - key: key, - value: nextKeyOut, - } - _, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return wrapMapError(err) -} - -func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) { - attr := bpfObjGetNextIDAttr{ - startID: start, - } - _, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return attr.nextID, err -} - -func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) { - fd, err := m.Value() +var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) if err != nil { - return 0, err - } - - attr := bpfBatchMapOpAttr{ - inBatch: inBatch, - outBatch: outBatch, - keys: keys, - values: values, - count: count, - mapFd: fd, - } - if opts != nil { - attr.elemFlags = opts.ElemFlags - attr.flags = opts.Flags + return internal.ErrNotSupported } - _, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - // always return count even on an error, as things like update might partially be fulfilled. - return attr.count, wrapMapError(err) -} + _ = m.Close() + return nil +}) func wrapMapError(err error) error { if err == nil { @@ -361,60 +136,34 @@ func wrapMapError(err error) error { } if errors.Is(err, unix.ENOENT) { - return internal.SyscallError(ErrKeyNotExist, unix.ENOENT) + return sys.Error(ErrKeyNotExist, unix.ENOENT) } if errors.Is(err, unix.EEXIST) { - return internal.SyscallError(ErrKeyExist, unix.EEXIST) + return sys.Error(ErrKeyExist, unix.EEXIST) } if errors.Is(err, unix.ENOTSUPP) { - return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP) + return sys.Error(ErrNotSupported, unix.ENOTSUPP) } - return err -} - -func bpfMapFreeze(m *internal.FD) error { - fd, err := m.Value() - if err != nil { - return err + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) } - attr := bpfMapFreezeAttr{ - mapFd: fd, - } - _, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) return err } -func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) { - var info bpfProgInfo - if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { - return nil, fmt.Errorf("can't get program info: %w", err) - } - return &info, nil -} - -func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) { - var info bpfMapInfo - err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)) - if err != nil { - return nil, fmt.Errorf("can't get map info: %w", err) - } - return &info, nil -} - var haveObjName = internal.FeatureTest("object names", "4.15", func() error { - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapName: internal.NewBPFObjName("feature_test"), + MapName: sys.NewObjName("feature_test"), } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } @@ -428,15 +177,15 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() return err } - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Array), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapName: internal.NewBPFObjName(".test"), + MapName: sys.NewObjName(".test"), } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } @@ -447,34 +196,69 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { var maxEntries uint32 = 2 - attr := internal.BPFMapCreateAttr{ - MapType: uint32(Hash), + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), KeySize: 4, ValueSize: 4, MaxEntries: maxEntries, } - fd, err := internal.BPFMapCreate(&attr) + fd, err := sys.MapCreate(&attr) if err != nil { return internal.ErrNotSupported } defer fd.Close() + keys := []uint32{1, 2} values := []uint32{3, 4} kp, _ := marshalPtr(keys, 8) vp, _ := marshalPtr(values, 8) - nilPtr := internal.NewPointer(nil) - _, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) if err != nil { return internal.ErrNotSupported } return nil }) -func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) { - attr := bpfGetFDByIDAttr{ - id: id, +var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), } - ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return internal.NewFD(uint32(ptr)), err -} + + fd, err := progLoad(insns, Kprobe, "GPL") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) + +var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + _ = fd.Close() + return nil +}) diff --git a/agent/vendor/github.com/cilium/ebpf/types.go b/agent/vendor/github.com/cilium/ebpf/types.go index 441a82fe4c1..a27b4424745 100644 --- a/agent/vendor/github.com/cilium/ebpf/types.go +++ b/agent/vendor/github.com/cilium/ebpf/types.go @@ -4,12 +4,17 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType +//go:generate stringer -output types_string.go -type=MapType,ProgramType,PinType // MapType indicates the type map structure // that will be initialized in the kernel. type MapType uint32 +// Max returns the latest supported MapType. +func (MapType) Max() MapType { + return maxMapType - 1 +} + // All the various map types that can be created const ( UnspecifiedMap MapType = iota @@ -85,15 +90,22 @@ const ( SkStorage // DevMapHash - Hash-based indexing scheme for references to network devices. DevMapHash - StructOpts + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. RingBuf + // InodeStorage - Specialized local storage map for inodes. InodeStorage + // TaskStorage - Specialized local storage map for task_struct. TaskStorage + // maxMapType - Bound enum of MapTypes, has to be last in enum. + maxMapType ) // hasPerCPUValue returns true if the Map stores a value per CPU. func (mt MapType) hasPerCPUValue() bool { - return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash + return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage } // canStoreMap returns true if the map type accepts a map fd @@ -108,9 +120,25 @@ func (mt MapType) canStoreProgram() bool { return mt == ProgramArray } +// hasBTF returns true if the map type supports BTF key/value metadata. +func (mt MapType) hasBTF() bool { + switch mt { + case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap, + DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf: + return false + default: + return true + } +} + // ProgramType of the eBPF program type ProgramType uint32 +// Max return the latest supported ProgramType. +func (ProgramType) Max() ProgramType { + return maxProgramType - 1 +} + // eBPF program types const ( UnspecifiedProgram ProgramType = iota @@ -144,6 +172,8 @@ const ( Extension LSM SkLookup + Syscall + maxProgramType ) // AttachType of the eBPF program, needed to differentiate allowed context accesses in @@ -151,6 +181,8 @@ const ( // Will cause invalid argument (EINVAL) at program load time if set incorrectly. type AttachType uint32 +//go:generate stringer -type AttachType -trimprefix Attach + // AttachNone is an alias for AttachCGroupInetIngress for readability reasons. const AttachNone AttachType = 0 @@ -193,6 +225,10 @@ const ( AttachXDPCPUMap AttachSkLookup AttachXDP + AttachSkSKBVerdict + AttachSkReuseportSelect + AttachSkReuseportSelectOrMigrate + AttachPerfEvent ) // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command diff --git a/agent/vendor/github.com/cilium/ebpf/types_string.go b/agent/vendor/github.com/cilium/ebpf/types_string.go index c25f7656479..e80b948b096 100644 --- a/agent/vendor/github.com/cilium/ebpf/types_string.go +++ b/agent/vendor/github.com/cilium/ebpf/types_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT. +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. package ebpf @@ -34,15 +34,16 @@ func _() { _ = x[Stack-23] _ = x[SkStorage-24] _ = x[DevMapHash-25] - _ = x[StructOpts-26] + _ = x[StructOpsMap-26] _ = x[RingBuf-27] _ = x[InodeStorage-28] _ = x[TaskStorage-29] + _ = x[maxMapType-30] } -const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOptsRingBufInodeStorageTaskStorage" +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType" -var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 258, 265, 277, 288} +var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300} func (i MapType) String() string { if i >= MapType(len(_MapType_index)-1) { @@ -85,11 +86,13 @@ func _() { _ = x[Extension-28] _ = x[LSM-29] _ = x[SkLookup-30] + _ = x[Syscall-31] + _ = x[maxProgramType-32] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup" +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType" -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294} +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315} func (i ProgramType) String() string { if i >= ProgramType(len(_ProgramType_index)-1) { @@ -97,61 +100,6 @@ func (i ProgramType) String() string { } return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] } -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[AttachNone-0] - _ = x[AttachCGroupInetIngress-0] - _ = x[AttachCGroupInetEgress-1] - _ = x[AttachCGroupInetSockCreate-2] - _ = x[AttachCGroupSockOps-3] - _ = x[AttachSkSKBStreamParser-4] - _ = x[AttachSkSKBStreamVerdict-5] - _ = x[AttachCGroupDevice-6] - _ = x[AttachSkMsgVerdict-7] - _ = x[AttachCGroupInet4Bind-8] - _ = x[AttachCGroupInet6Bind-9] - _ = x[AttachCGroupInet4Connect-10] - _ = x[AttachCGroupInet6Connect-11] - _ = x[AttachCGroupInet4PostBind-12] - _ = x[AttachCGroupInet6PostBind-13] - _ = x[AttachCGroupUDP4Sendmsg-14] - _ = x[AttachCGroupUDP6Sendmsg-15] - _ = x[AttachLircMode2-16] - _ = x[AttachFlowDissector-17] - _ = x[AttachCGroupSysctl-18] - _ = x[AttachCGroupUDP4Recvmsg-19] - _ = x[AttachCGroupUDP6Recvmsg-20] - _ = x[AttachCGroupGetsockopt-21] - _ = x[AttachCGroupSetsockopt-22] - _ = x[AttachTraceRawTp-23] - _ = x[AttachTraceFEntry-24] - _ = x[AttachTraceFExit-25] - _ = x[AttachModifyReturn-26] - _ = x[AttachLSMMac-27] - _ = x[AttachTraceIter-28] - _ = x[AttachCgroupInet4GetPeername-29] - _ = x[AttachCgroupInet6GetPeername-30] - _ = x[AttachCgroupInet4GetSockname-31] - _ = x[AttachCgroupInet6GetSockname-32] - _ = x[AttachXDPDevMap-33] - _ = x[AttachCgroupInetSockRelease-34] - _ = x[AttachXDPCPUMap-35] - _ = x[AttachSkLookup-36] - _ = x[AttachXDP-37] -} - -const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP" - -var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774} - -func (i AttachType) String() string { - if i >= AttachType(len(_AttachType_index)-1) { - return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] -} func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. diff --git a/agent/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/agent/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index cff5af1a64c..147f756fe24 100644 --- a/agent/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/agent/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -176,6 +176,11 @@ func (c *Conn) Close() { c.sigconn.Close() } +// Connected returns whether conn is connected +func (c *Conn) Connected() bool { + return c.sysconn.Connected() && c.sigconn.Connected() +} + // NewConnection establishes a connection to a bus using a caller-supplied function. // This allows connecting to remote buses through a user-supplied mechanism. // The supplied function may be called multiple times, and should return independent connections. diff --git a/agent/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/agent/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index fa04afc708e..074148cb4d6 100644 --- a/agent/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/agent/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -417,6 +417,29 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { return status, nil } +// GetUnitByPID returns the unit object path of the unit a process ID +// belongs to. It takes a UNIX PID and returns the object path. The PID must +// refer to an existing system process +func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { + var result dbus.ObjectPath + + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) + + return result, err +} + +// GetUnitNameByPID returns the name of the unit a process ID belongs to. It +// takes a UNIX PID and returns the object path. The PID must refer to an +// existing system process +func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { + path, err := c.GetUnitByPID(ctx, pid) + if err != nil { + return "", err + } + + return unitName(path), nil +} + // Deprecated: use ListUnitsContext instead. func (c *Conn) ListUnits() ([]UnitStatus, error) { return c.ListUnitsContext(context.Background()) @@ -828,3 +851,14 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { return status, nil } + +// Freeze the cgroup associated with the unit. +// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() +} + +// Unfreeze the cgroup associated with the unit. +func (c *Conn) ThawUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() +} diff --git a/agent/vendor/github.com/godbus/dbus/v5/auth.go b/agent/vendor/github.com/godbus/dbus/v5/auth.go index a59b4c0eb76..0f3b252c070 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/auth.go +++ b/agent/vendor/github.com/godbus/dbus/v5/auth.go @@ -176,9 +176,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo return err, false } state = waitingForReject + } else { + conn.uuid = string(s[1]) + return nil, true } - conn.uuid = string(s[1]) - return nil, true case state == waitingForData: err = authWriteLine(conn.transport, []byte("ERROR")) if err != nil { @@ -191,9 +192,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo return err, false } state = waitingForReject + } else { + conn.uuid = string(s[1]) + return nil, true } - conn.uuid = string(s[1]) - return nil, true case state == waitingForOk && string(s[0]) == "DATA": err = authWriteLine(conn.transport, []byte("DATA")) if err != nil { diff --git a/agent/vendor/github.com/godbus/dbus/v5/conn.go b/agent/vendor/github.com/godbus/dbus/v5/conn.go index 76fc5cde3d2..69978ea26ab 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/conn.go +++ b/agent/vendor/github.com/godbus/dbus/v5/conn.go @@ -169,7 +169,7 @@ func Connect(address string, opts ...ConnOption) (*Conn, error) { // SystemBusPrivate returns a new private connection to the system bus. // Note: this connection is not ready to use. One must perform Auth and Hello -// on the connection before it is useable. +// on the connection before it is usable. func SystemBusPrivate(opts ...ConnOption) (*Conn, error) { return Dial(getSystemBusPlatformAddress(), opts...) } @@ -284,10 +284,6 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) { conn.ctx = context.Background() } conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx) - go func() { - <-conn.ctx.Done() - conn.Close() - }() conn.calls = newCallTracker() if conn.handler == nil { @@ -302,6 +298,11 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) { conn.outHandler = &outputHandler{conn: conn} conn.names = newNameTracker() conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + + go func() { + <-conn.ctx.Done() + conn.Close() + }() return conn, nil } @@ -550,6 +551,11 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { call.ctx = ctx call.ctxCanceler = canceler conn.calls.track(msg.serial, call) + if ctx.Err() != nil { + // short path: don't even send the message if context already cancelled + conn.calls.handleSendError(msg, ctx.Err()) + return call + } go func() { <-ctx.Done() conn.calls.handleSendError(msg, ctx.Err()) @@ -649,7 +655,9 @@ func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...Match // Signal registers the given channel to be passed all received signal messages. // -// Multiple of these channels can be registered at the same time. +// Multiple of these channels can be registered at the same time. The channel is +// closed if the Conn is closed; it should not be closed by the caller before +// RemoveSignal was called on it. // // These channels are "overwritten" by Eavesdrop; i.e., if there currently is a // channel for eavesdropped messages, this channel receives all signals, and @@ -765,7 +773,12 @@ func getKey(s, key string) string { for _, keyEqualsValue := range strings.Split(s, ",") { keyValue := strings.SplitN(keyEqualsValue, "=", 2) if len(keyValue) == 2 && keyValue[0] == key { - return keyValue[1] + val, err := UnescapeBusAddressValue(keyValue[1]) + if err != nil { + // No way to return an error. + return "" + } + return val } } return "" diff --git a/agent/vendor/github.com/godbus/dbus/v5/conn_other.go b/agent/vendor/github.com/godbus/dbus/v5/conn_other.go index 616dcf66449..90289ca85a2 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/conn_other.go +++ b/agent/vendor/github.com/godbus/dbus/v5/conn_other.go @@ -54,7 +54,7 @@ func tryDiscoverDbusSessionBusAddress() string { if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) { // if /run/user//bus exists, that file itself // *is* the unix socket, so return its path - return fmt.Sprintf("unix:path=%s", runUserBusFile) + return fmt.Sprintf("unix:path=%s", EscapeBusAddressValue(runUserBusFile)) } if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) { // if /run/user//dbus-session exists, it's a @@ -85,9 +85,6 @@ func getRuntimeDirectory() (string, error) { } func fileExists(filename string) bool { - if _, err := os.Stat(filename); !os.IsNotExist(err) { - return true - } else { - return false - } + _, err := os.Stat(filename) + return !os.IsNotExist(err) } diff --git a/agent/vendor/github.com/godbus/dbus/v5/dbus.go b/agent/vendor/github.com/godbus/dbus/v5/dbus.go index ddf3b7afde9..c188d104854 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/dbus.go +++ b/agent/vendor/github.com/godbus/dbus/v5/dbus.go @@ -122,8 +122,11 @@ func isConvertibleTo(dest, src reflect.Type) bool { case dest.Kind() == reflect.Slice: return src.Kind() == reflect.Slice && isConvertibleTo(dest.Elem(), src.Elem()) + case dest.Kind() == reflect.Ptr: + dest = dest.Elem() + return isConvertibleTo(dest, src) case dest.Kind() == reflect.Struct: - return src == interfacesType + return src == interfacesType || dest.Kind() == src.Kind() default: return src.ConvertibleTo(dest) } @@ -274,13 +277,8 @@ func storeSliceIntoInterface(dest, src reflect.Value) error { func storeSliceIntoSlice(dest, src reflect.Value) error { if dest.IsNil() || dest.Len() < src.Len() { dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap())) - } - if dest.Len() != src.Len() { - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "slices are different lengths "+ - "need: %d have: %d", - src.Len(), dest.Len()) + } else if dest.Len() > src.Len() { + dest.Set(dest.Slice(0, src.Len())) } for i := 0; i < src.Len(); i++ { err := store(dest.Index(i), getVariantValue(src.Index(i))) diff --git a/agent/vendor/github.com/godbus/dbus/v5/doc.go b/agent/vendor/github.com/godbus/dbus/v5/doc.go index ade1df951cd..8f25a00d61e 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/doc.go +++ b/agent/vendor/github.com/godbus/dbus/v5/doc.go @@ -10,8 +10,10 @@ value. Conversion Rules For outgoing messages, Go types are automatically converted to the -corresponding D-Bus types. The following types are directly encoded as their -respective D-Bus equivalents: +corresponding D-Bus types. See the official specification at +https://dbus.freedesktop.org/doc/dbus-specification.html#type-system for more +information on the D-Bus type system. The following types are directly encoded +as their respective D-Bus equivalents: Go type | D-Bus type ------------+----------- @@ -39,8 +41,8 @@ Maps encode as DICTs, provided that their key type can be used as a key for a DICT. Structs other than Variant and Signature encode as a STRUCT containing their -exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will -be skipped. +exported fields in order. Fields whose tags contain `dbus:"-"` and unexported +fields will be skipped. Pointers encode as the value they're pointed to. diff --git a/agent/vendor/github.com/godbus/dbus/v5/escape.go b/agent/vendor/github.com/godbus/dbus/v5/escape.go new file mode 100644 index 00000000000..d1509d94581 --- /dev/null +++ b/agent/vendor/github.com/godbus/dbus/v5/escape.go @@ -0,0 +1,84 @@ +package dbus + +import "net/url" + +// EscapeBusAddressValue implements a requirement to escape the values +// in D-Bus server addresses, as defined by the D-Bus specification at +// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses. +func EscapeBusAddressValue(val string) string { + toEsc := strNeedsEscape(val) + if toEsc == 0 { + // Avoid unneeded allocation/copying. + return val + } + + // Avoid allocation for short paths. + var buf [64]byte + var out []byte + // Every to-be-escaped byte needs 2 extra bytes. + required := len(val) + 2*toEsc + if required <= len(buf) { + out = buf[:required] + } else { + out = make([]byte, required) + } + + j := 0 + for i := 0; i < len(val); i++ { + if ch := val[i]; needsEscape(ch) { + // Convert ch to %xx, where xx is hex value. + out[j] = '%' + out[j+1] = hexchar(ch >> 4) + out[j+2] = hexchar(ch & 0x0F) + j += 3 + } else { + out[j] = ch + j++ + } + } + + return string(out) +} + +// UnescapeBusAddressValue unescapes values in D-Bus server addresses, +// as defined by the D-Bus specification at +// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses. +func UnescapeBusAddressValue(val string) (string, error) { + // Looks like url.PathUnescape does exactly what is required. + return url.PathUnescape(val) +} + +// hexchar returns an octal representation of a n, where n < 16. +// For invalid values of n, the function panics. +func hexchar(n byte) byte { + const hex = "0123456789abcdef" + + // For n >= len(hex), runtime will panic. + return hex[n] +} + +// needsEscape tells if a byte is NOT one of optionally-escaped bytes. +func needsEscape(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '/', '\\', '.', '*': + return false + } + + return true +} + +// strNeedsEscape tells how many bytes in the string need escaping. +func strNeedsEscape(val string) int { + count := 0 + + for i := 0; i < len(val); i++ { + if needsEscape(val[i]) { + count++ + } + } + + return count +} diff --git a/agent/vendor/github.com/godbus/dbus/v5/export.go b/agent/vendor/github.com/godbus/dbus/v5/export.go index 522334715b6..d3dd9f7cd62 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/export.go +++ b/agent/vendor/github.com/godbus/dbus/v5/export.go @@ -3,6 +3,7 @@ package dbus import ( "errors" "fmt" + "os" "reflect" "strings" ) @@ -209,28 +210,23 @@ func (conn *Conn) handleCall(msg *Message) { } reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) - conn.sendMessageAndIfClosed(reply, nil) + if err := reply.IsValid(); err != nil { + fmt.Fprintf(os.Stderr, "dbus: dropping invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err) + } else { + conn.sendMessageAndIfClosed(reply, nil) + } } } // Emit emits the given signal on the message bus. The name parameter must be // formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { - if !path.IsValid() { - return errors.New("dbus: invalid object path") - } i := strings.LastIndex(name, ".") if i == -1 { return errors.New("dbus: invalid method name") } iface := name[:i] member := name[i+1:] - if !isValidMember(member) { - return errors.New("dbus: invalid method name") - } - if !isValidInterface(iface) { - return errors.New("dbus: invalid interface name") - } msg := new(Message) msg.Type = TypeSignal msg.Headers = make(map[HeaderField]Variant) @@ -241,6 +237,9 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro if len(values) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) } + if err := msg.IsValid(); err != nil { + return err + } var closed bool conn.sendMessageAndIfClosed(msg, func() { diff --git a/agent/vendor/github.com/godbus/dbus/v5/homedir.go b/agent/vendor/github.com/godbus/dbus/v5/homedir.go index 0b745f9313a..c44d9b5fc24 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/homedir.go +++ b/agent/vendor/github.com/godbus/dbus/v5/homedir.go @@ -2,27 +2,24 @@ package dbus import ( "os" - "sync" -) - -var ( - homeDir string - homeDirLock sync.Mutex + "os/user" ) +// Get returns the home directory of the current user, which is usually the +// value of HOME environment variable. In case it is not set or empty, os/user +// package is used. +// +// If linking statically with cgo enabled against glibc, make sure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. func getHomeDir() string { - homeDirLock.Lock() - defer homeDirLock.Unlock() - + homeDir := os.Getenv("HOME") if homeDir != "" { return homeDir } - - homeDir = os.Getenv("HOME") - if homeDir != "" { - return homeDir + if u, err := user.Current(); err == nil { + return u.HomeDir } - - homeDir = lookupHomeDir() - return homeDir + return "/" } diff --git a/agent/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go b/agent/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go deleted file mode 100644 index 2732081e73b..00000000000 --- a/agent/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !static_build - -package dbus - -import ( - "os/user" -) - -func lookupHomeDir() string { - u, err := user.Current() - if err != nil { - return "/" - } - return u.HomeDir -} diff --git a/agent/vendor/github.com/godbus/dbus/v5/homedir_static.go b/agent/vendor/github.com/godbus/dbus/v5/homedir_static.go deleted file mode 100644 index b9d9cb5525a..00000000000 --- a/agent/vendor/github.com/godbus/dbus/v5/homedir_static.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build static_build - -package dbus - -import ( - "bufio" - "os" - "strconv" - "strings" -) - -func lookupHomeDir() string { - myUid := os.Getuid() - - f, err := os.Open("/etc/passwd") - if err != nil { - return "/" - } - defer f.Close() - - s := bufio.NewScanner(f) - - for s.Scan() { - if err := s.Err(); err != nil { - break - } - - line := strings.TrimSpace(s.Text()) - if line == "" { - continue - } - - parts := strings.Split(line, ":") - - if len(parts) >= 6 { - uid, err := strconv.Atoi(parts[2]) - if err == nil && uid == myUid { - return parts[5] - } - } - } - - // Default to / if we can't get a better value - return "/" -} diff --git a/agent/vendor/github.com/godbus/dbus/v5/message.go b/agent/vendor/github.com/godbus/dbus/v5/message.go index 16693eb3017..bdf43fdd6e5 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/message.go +++ b/agent/vendor/github.com/godbus/dbus/v5/message.go @@ -208,7 +208,7 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) { // The possibly returned error can be an error of the underlying reader, an // InvalidMessageError or a FormatError. func DecodeMessage(rd io.Reader) (msg *Message, err error) { - return DecodeMessageWithFDs(rd, make([]int, 0)); + return DecodeMessageWithFDs(rd, make([]int, 0)) } type nullwriter struct{} @@ -227,8 +227,8 @@ func (msg *Message) CountFds() (int, error) { } func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds []int, err error) { - if err := msg.IsValid(); err != nil { - return make([]int, 0), err + if err := msg.validateHeader(); err != nil { + return nil, err } var vs [7]interface{} switch order { @@ -237,7 +237,7 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds case binary.BigEndian: vs[0] = byte('B') default: - return make([]int, 0), errors.New("dbus: invalid byte order") + return nil, errors.New("dbus: invalid byte order") } body := new(bytes.Buffer) fds = make([]int, 0) @@ -284,8 +284,13 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error) } // IsValid checks whether msg is a valid message and returns an -// InvalidMessageError if it is not. +// InvalidMessageError or FormatError if it is not. func (msg *Message) IsValid() error { + var b bytes.Buffer + return msg.EncodeTo(&b, nativeEndian) +} + +func (msg *Message) validateHeader() error { if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 { return InvalidMessageError("invalid flags") } @@ -330,6 +335,7 @@ func (msg *Message) IsValid() error { return InvalidMessageError("missing signature") } } + return nil } diff --git a/agent/vendor/github.com/godbus/dbus/v5/server_interfaces.go b/agent/vendor/github.com/godbus/dbus/v5/server_interfaces.go index 79d97edf3ec..e4e0389fdf0 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/server_interfaces.go +++ b/agent/vendor/github.com/godbus/dbus/v5/server_interfaces.go @@ -63,7 +63,7 @@ type Method interface { // any other decoding scheme. type ArgumentDecoder interface { // To decode the arguments of a method the sender and message are - // provided incase the semantics of the implementer provides access + // provided in case the semantics of the implementer provides access // to these as part of the method invocation. DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) } diff --git a/agent/vendor/github.com/godbus/dbus/v5/sig.go b/agent/vendor/github.com/godbus/dbus/v5/sig.go index 41a0398129d..6b9cadb5fbc 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/sig.go +++ b/agent/vendor/github.com/godbus/dbus/v5/sig.go @@ -102,7 +102,7 @@ func getSignature(t reflect.Type, depth *depthCounter) (sig string) { } } if len(s) == 0 { - panic("empty struct") + panic(InvalidTypeError{t}) } return "(" + s + ")" case reflect.Array, reflect.Slice: diff --git a/agent/vendor/github.com/godbus/dbus/v5/transport_unix.go b/agent/vendor/github.com/godbus/dbus/v5/transport_unix.go index 2212e7fa7f2..0a8c712ebdf 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/transport_unix.go +++ b/agent/vendor/github.com/godbus/dbus/v5/transport_unix.go @@ -154,17 +154,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) { // substitute the values in the message body (which are indices for the // array receiver via OOB) with the actual values for i, v := range msg.Body { - switch v.(type) { + switch index := v.(type) { case UnixFDIndex: - j := v.(UnixFDIndex) - if uint32(j) >= unixfds { + if uint32(index) >= unixfds { return nil, InvalidMessageError("invalid index for unix fd") } - msg.Body[i] = UnixFD(fds[j]) + msg.Body[i] = UnixFD(fds[index]) case []UnixFDIndex: - idxArray := v.([]UnixFDIndex) - fdArray := make([]UnixFD, len(idxArray)) - for k, j := range idxArray { + fdArray := make([]UnixFD, len(index)) + for k, j := range index { if uint32(j) >= unixfds { return nil, InvalidMessageError("invalid index for unix fd") } diff --git a/agent/vendor/github.com/godbus/dbus/v5/transport_zos.go b/agent/vendor/github.com/godbus/dbus/v5/transport_zos.go new file mode 100644 index 00000000000..1bba0d6bf78 --- /dev/null +++ b/agent/vendor/github.com/godbus/dbus/v5/transport_zos.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/agent/vendor/github.com/godbus/dbus/v5/variant.go b/agent/vendor/github.com/godbus/dbus/v5/variant.go index f1e81f3ede6..ca3dbe16a44 100644 --- a/agent/vendor/github.com/godbus/dbus/v5/variant.go +++ b/agent/vendor/github.com/godbus/dbus/v5/variant.go @@ -49,7 +49,7 @@ func ParseVariant(s string, sig Signature) (Variant, error) { } // format returns a formatted version of v and whether this string can be parsed -// unambigously. +// unambiguously. func (v Variant) format() (string, bool) { switch v.sig.str[0] { case 'b', 'i': diff --git a/agent/vendor/github.com/golang/mock/gomock/call.go b/agent/vendor/github.com/golang/mock/gomock/call.go index 7345f6540f6..13c9f44b1ef 100644 --- a/agent/vendor/github.com/golang/mock/gomock/call.go +++ b/agent/vendor/github.com/golang/mock/gomock/call.go @@ -50,19 +50,22 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle t.Helper() // TODO: check arity, types. - margs := make([]Matcher, len(args)) + mArgs := make([]Matcher, len(args)) for i, arg := range args { if m, ok := arg.(Matcher); ok { - margs[i] = m + mArgs[i] = m } else if arg == nil { // Handle nil specially so that passing a nil interface value // will match the typed nils of concrete args. - margs[i] = Nil() + mArgs[i] = Nil() } else { - margs[i] = Eq(arg) + mArgs[i] = Eq(arg) } } + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. origin := callerInfo(3) actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { // Synthesize the zero value for each of the return args' types. @@ -73,7 +76,7 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle return rets }} return &Call{t: t, receiver: receiver, method: method, methodType: methodType, - args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} + args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} } // AnyTimes allows the expectation to be called 0 or more times @@ -110,19 +113,25 @@ func (c *Call) DoAndReturn(f interface{}) *Call { v := reflect.ValueOf(f) c.addAction(func(args []interface{}) []interface{} { - vargs := make([]reflect.Value, len(args)) + c.t.Helper() + vArgs := make([]reflect.Value, len(args)) ft := v.Type() + if c.methodType.NumIn() != ft.NumIn() { + c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + return nil + } for i := 0; i < len(args); i++ { if args[i] != nil { - vargs[i] = reflect.ValueOf(args[i]) + vArgs[i] = reflect.ValueOf(args[i]) } else { // Use the zero value for the arg. - vargs[i] = reflect.Zero(ft.In(i)) + vArgs[i] = reflect.Zero(ft.In(i)) } } - vrets := v.Call(vargs) - rets := make([]interface{}, len(vrets)) - for i, ret := range vrets { + vRets := v.Call(vArgs) + rets := make([]interface{}, len(vRets)) + for i, ret := range vRets { rets[i] = ret.Interface() } return rets @@ -139,17 +148,23 @@ func (c *Call) Do(f interface{}) *Call { v := reflect.ValueOf(f) c.addAction(func(args []interface{}) []interface{} { - vargs := make([]reflect.Value, len(args)) + c.t.Helper() + if c.methodType.NumIn() != v.Type().NumIn() { + c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin) + return nil + } + vArgs := make([]reflect.Value, len(args)) ft := v.Type() for i := 0; i < len(args); i++ { if args[i] != nil { - vargs[i] = reflect.ValueOf(args[i]) + vArgs[i] = reflect.ValueOf(args[i]) } else { // Use the zero value for the arg. - vargs[i] = reflect.Zero(ft.In(i)) + vArgs[i] = reflect.Zero(ft.In(i)) } } - v.Call(vargs) + v.Call(vArgs) return nil }) return c @@ -301,14 +316,9 @@ func (c *Call) matches(args []interface{}) error { for i, m := range c.args { if !m.Matches(args[i]) { - got := fmt.Sprintf("%v", args[i]) - if gs, ok := m.(GotFormatter); ok { - got = gs.Got(args[i]) - } - return fmt.Errorf( "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", - c.origin, i, got, m, + c.origin, i, formatGottenArg(m, args[i]), m, ) } } @@ -331,7 +341,7 @@ func (c *Call) matches(args []interface{}) error { // Non-variadic args if !m.Matches(args[i]) { return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), args[i], m) + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) } continue } @@ -355,12 +365,12 @@ func (c *Call) matches(args []interface{}) error { // matches all the remaining arguments or the lack of any. // Convert the remaining arguments, if any, into a slice of the // expected type. - vargsType := c.methodType.In(c.methodType.NumIn() - 1) - vargs := reflect.MakeSlice(vargsType, 0, len(args)-i) + vArgsType := c.methodType.In(c.methodType.NumIn() - 1) + vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i) for _, arg := range args[i:] { - vargs = reflect.Append(vargs, reflect.ValueOf(arg)) + vArgs = reflect.Append(vArgs, reflect.ValueOf(arg)) } - if m.Matches(vargs.Interface()) { + if m.Matches(vArgs.Interface()) { // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any()) // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher) // Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any()) @@ -373,16 +383,16 @@ func (c *Call) matches(args []interface{}) error { // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) // Got Foo(a, b, c) want Foo(matcherA, matcherB) - return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), args[i:], c.args[i]) + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) } } // Check that all prerequisite calls have been satisfied. for _, preReqCall := range c.preReqs { if !preReqCall.satisfied() { - return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", + return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", c.origin, preReqCall, c) } } @@ -425,3 +435,11 @@ func setSlice(arg interface{}, v reflect.Value) { func (c *Call) addAction(action func([]interface{}) []interface{}) { c.actions = append(c.actions, action) } + +func formatGottenArg(m Matcher, arg interface{}) string { + got := fmt.Sprintf("%v (%T)", arg, arg) + if gs, ok := m.(GotFormatter); ok { + got = gs.Got(arg) + } + return got +} diff --git a/agent/vendor/github.com/golang/mock/gomock/callset.go b/agent/vendor/github.com/golang/mock/gomock/callset.go index b046b525e96..49dba787a40 100644 --- a/agent/vendor/github.com/golang/mock/gomock/callset.go +++ b/agent/vendor/github.com/golang/mock/gomock/callset.go @@ -16,6 +16,7 @@ package gomock import ( "bytes" + "errors" "fmt" ) @@ -84,14 +85,18 @@ func (cs callSet) FindMatch(receiver interface{}, method string, args []interfac for _, call := range exhausted { if err := call.matches(args); err != nil { _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + continue } + _, _ = fmt.Fprintf( + &callsErrors, "all expected calls for method %q have been exhausted", method, + ) } if len(expected)+len(exhausted) == 0 { _, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method) } - return nil, fmt.Errorf(callsErrors.String()) + return nil, errors.New(callsErrors.String()) } // Failures returns the calls that are not satisfied. diff --git a/agent/vendor/github.com/golang/mock/gomock/controller.go b/agent/vendor/github.com/golang/mock/gomock/controller.go index d7c3c656ae0..f054200d56c 100644 --- a/agent/vendor/github.com/golang/mock/gomock/controller.go +++ b/agent/vendor/github.com/golang/mock/gomock/controller.go @@ -50,9 +50,6 @@ // mockObj.EXPECT().SomeMethod(2, "second"), // mockObj.EXPECT().SomeMethod(3, "third"), // ) -// -// TODO: -// - Handle different argument/return types (e.g. ..., chan, map, interface). package gomock import ( @@ -77,6 +74,15 @@ type TestHelper interface { Helper() } +// cleanuper is used to check if TestHelper also has the `Cleanup` method. A +// common pattern is to pass in a `*testing.T` to +// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup +// method. This can be utilized to call `Finish()` so the caller of this library +// does not have to. +type cleanuper interface { + Cleanup(func()) +} + // A Controller represents the top-level control of a mock ecosystem. It // defines the scope and lifetime of mock objects, as well as their // expectations. It is safe to call Controller's methods from multiple @@ -115,29 +121,43 @@ type Controller struct { // NewController returns a new Controller. It is the preferred way to create a // Controller. +// +// New in go1.14+, if you are passing a *testing.T into this function you no +// longer need to call ctrl.Finish() in your test methods. func NewController(t TestReporter) *Controller { h, ok := t.(TestHelper) if !ok { - h = nopTestHelper{t} + h = &nopTestHelper{t} } - - return &Controller{ + ctrl := &Controller{ T: h, expectedCalls: newCallSet(), } + if c, ok := isCleanuper(ctrl.T); ok { + c.Cleanup(func() { + ctrl.T.Helper() + ctrl.finish(true, nil) + }) + } + + return ctrl } type cancelReporter struct { - TestHelper + t TestHelper cancel func() } func (r *cancelReporter) Errorf(format string, args ...interface{}) { - r.TestHelper.Errorf(format, args...) + r.t.Errorf(format, args...) } func (r *cancelReporter) Fatalf(format string, args ...interface{}) { defer r.cancel() - r.TestHelper.Fatalf(format, args...) + r.t.Fatalf(format, args...) +} + +func (r *cancelReporter) Helper() { + r.t.Helper() } // WithContext returns a new Controller and a Context, which is cancelled on any @@ -145,15 +165,22 @@ func (r *cancelReporter) Fatalf(format string, args ...interface{}) { func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { h, ok := t.(TestHelper) if !ok { - h = nopTestHelper{t} + h = &nopTestHelper{t: t} } ctx, cancel := context.WithCancel(ctx) - return NewController(&cancelReporter{h, cancel}), ctx + return NewController(&cancelReporter{t: h, cancel: cancel}), ctx } type nopTestHelper struct { - TestReporter + t TestReporter +} + +func (h *nopTestHelper) Errorf(format string, args ...interface{}) { + h.t.Errorf(format, args...) +} +func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { + h.t.Fatalf(format, args...) } func (h nopTestHelper) Helper() {} @@ -197,7 +224,10 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) if err != nil { - origin := callerInfo(2) + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. + origin := callerInfo(3) ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) } @@ -229,21 +259,33 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf // Finish checks to see if all the methods that were expected to be called // were called. It should be invoked for each Controller. It is not idempotent // and therefore can only be invoked once. +// +// New in go1.14+, if you are passing a *testing.T into NewController function you no +// longer need to call ctrl.Finish() in your test methods. func (ctrl *Controller) Finish() { + // If we're currently panicking, probably because this is a deferred call. + // This must be recovered in the deferred function. + err := recover() + ctrl.finish(false, err) +} + +func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { ctrl.T.Helper() ctrl.mu.Lock() defer ctrl.mu.Unlock() if ctrl.finished { - ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + if _, ok := isCleanuper(ctrl.T); !ok { + ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + } + return } ctrl.finished = true - // If we're currently panicking, probably because this is a deferred call, - // pass through the panic. - if err := recover(); err != nil { - panic(err) + // Short-circuit, pass through the panic. + if panicErr != nil { + panic(panicErr) } // Check that all remaining expected calls are satisfied. @@ -252,13 +294,43 @@ func (ctrl *Controller) Finish() { ctrl.T.Errorf("missing call(s) to %v", call) } if len(failures) != 0 { - ctrl.T.Fatalf("aborting test due to missing call(s)") + if !cleanup { + ctrl.T.Fatalf("aborting test due to missing call(s)") + return + } + ctrl.T.Errorf("aborting test due to missing call(s)") } } +// callerInfo returns the file:line of the call site. skip is the number +// of stack frames to skip when reporting. 0 is callerInfo's call site. func callerInfo(skip int) string { if _, file, line, ok := runtime.Caller(skip + 1); ok { return fmt.Sprintf("%s:%d", file, line) } return "unknown file" } + +// isCleanuper checks it if t's base TestReporter has a Cleanup method. +func isCleanuper(t TestReporter) (cleanuper, bool) { + tr := unwrapTestReporter(t) + c, ok := tr.(cleanuper) + return c, ok +} + +// unwrapTestReporter unwraps TestReporter to the base implementation. +func unwrapTestReporter(t TestReporter) TestReporter { + tr := t + switch nt := t.(type) { + case *cancelReporter: + tr = nt.t + if h, check := tr.(*nopTestHelper); check { + tr = h.t + } + case *nopTestHelper: + tr = nt.t + default: + // not wrapped + } + return tr +} diff --git a/agent/vendor/github.com/golang/mock/gomock/matchers.go b/agent/vendor/github.com/golang/mock/gomock/matchers.go index 7bfc07be43f..2822fb2c8c4 100644 --- a/agent/vendor/github.com/golang/mock/gomock/matchers.go +++ b/agent/vendor/github.com/golang/mock/gomock/matchers.go @@ -102,11 +102,25 @@ type eqMatcher struct { } func (e eqMatcher) Matches(x interface{}) bool { - return reflect.DeepEqual(e.x, x) + // In case, some value is nil + if e.x == nil || x == nil { + return reflect.DeepEqual(e.x, x) + } + + // Check if types assignable and convert them to common type + x1Val := reflect.ValueOf(e.x) + x2Val := reflect.ValueOf(x) + + if x1Val.Type().AssignableTo(x2Val.Type()) { + x1ValConverted := x1Val.Convert(x2Val.Type()) + return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) + } + + return false } func (e eqMatcher) String() string { - return fmt.Sprintf("is equal to %v", e.x) + return fmt.Sprintf("is equal to %v (%T)", e.x, e.x) } type nilMatcher struct{} @@ -139,7 +153,6 @@ func (n notMatcher) Matches(x interface{}) bool { } func (n notMatcher) String() string { - // TODO: Improve this if we add a NotString method to the Matcher interface. return "not(" + n.m.String() + ")" } @@ -194,6 +207,70 @@ func (m lenMatcher) String() string { return fmt.Sprintf("has length %d", m.i) } +type inAnyOrderMatcher struct { + x interface{} +} + +func (m inAnyOrderMatcher) Matches(x interface{}) bool { + given, ok := m.prepareValue(x) + if !ok { + return false + } + wanted, ok := m.prepareValue(m.x) + if !ok { + return false + } + + if given.Len() != wanted.Len() { + return false + } + + usedFromGiven := make([]bool, given.Len()) + foundFromWanted := make([]bool, wanted.Len()) + for i := 0; i < wanted.Len(); i++ { + wantedMatcher := Eq(wanted.Index(i).Interface()) + for j := 0; j < given.Len(); j++ { + if usedFromGiven[j] { + continue + } + if wantedMatcher.Matches(given.Index(j).Interface()) { + foundFromWanted[i] = true + usedFromGiven[j] = true + break + } + } + } + + missingFromWanted := 0 + for _, found := range foundFromWanted { + if !found { + missingFromWanted++ + } + } + extraInGiven := 0 + for _, used := range usedFromGiven { + if !used { + extraInGiven++ + } + } + + return extraInGiven == 0 && missingFromWanted == 0 +} + +func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) { + xValue := reflect.ValueOf(x) + switch xValue.Kind() { + case reflect.Slice, reflect.Array: + return xValue, true + default: + return reflect.Value{}, false + } +} + +func (m inAnyOrderMatcher) String() string { + return fmt.Sprintf("has the same elements as %v", m.x) +} + // Constructors // All returns a composite Matcher that returns true if and only all of the @@ -245,7 +322,7 @@ func Not(x interface{}) Matcher { // AssignableToTypeOf(s).Matches(time.Second) // returns true // AssignableToTypeOf(s).Matches(99) // returns false // -// var ctx = reflect.TypeOf((*context.Context)).Elem() +// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() // AssignableToTypeOf(ctx).Matches(context.Background()) // returns true func AssignableToTypeOf(x interface{}) Matcher { if xt, ok := x.(reflect.Type); ok { @@ -253,3 +330,12 @@ func AssignableToTypeOf(x interface{}) Matcher { } return assignableToTypeOfMatcher{reflect.TypeOf(x)} } + +// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order. +// +// Example usage: +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false +func InAnyOrder(x interface{}) Matcher { + return inAnyOrderMatcher{x} +} diff --git a/agent/vendor/github.com/golang/mock/mockgen/model/model.go b/agent/vendor/github.com/golang/mock/mockgen/model/model.go index 88783cfdf6e..2c6a62ceb26 100644 --- a/agent/vendor/github.com/golang/mock/mockgen/model/model.go +++ b/agent/vendor/github.com/golang/mock/mockgen/model/model.go @@ -71,6 +71,16 @@ func (intf *Interface) addImports(im map[string]bool) { } } +// AddMethod adds a new method, de-duplicating by method name. +func (intf *Interface) AddMethod(m *Method) { + for _, me := range intf.Methods { + if me.Name == m.Name { + return + } + } + intf.Methods = append(intf.Methods, m) +} + // Method is a single method of an interface. type Method struct { Name string @@ -250,11 +260,10 @@ func (mt *MapType) addImports(im map[string]bool) { // NamedType is an exported type in a package. type NamedType struct { Package string // may be empty - Type string // TODO: should this be typed Type? + Type string } func (nt *NamedType) String(pm map[string]string, pkgOverride string) string { - // TODO: is this right? if pkgOverride == nt.Package { return nt.Type } @@ -311,7 +320,7 @@ func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) { return nil, err } - intf.Methods = append(intf.Methods, m) + intf.AddMethod(m) } return intf, nil @@ -468,3 +477,19 @@ func impPath(imp string) string { } return imp } + +// ErrorInterface represent built-in error interface. +var ErrorInterface = Interface{ + Name: "error", + Methods: []*Method{ + { + Name: "Error", + Out: []*Parameter{ + { + Name: "", + Type: PredeclaredType("string"), + }, + }, + }, + }, +} diff --git a/agent/vendor/github.com/sirupsen/logrus/README.md b/agent/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa406..b042c896f25 100644 --- a/agent/vendor/github.com/sirupsen/logrus/README.md +++ b/agent/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -341,7 +341,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/agent/vendor/github.com/sirupsen/logrus/buffer_pool.go b/agent/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec07d8..c7787f77cbf 100644 --- a/agent/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/agent/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/agent/vendor/github.com/sirupsen/logrus/entry.go b/agent/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa724..71cdbbc35d2 100644 --- a/agent/vendor/github.com/sirupsen/logrus/entry.go +++ b/agent/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/agent/vendor/github.com/sirupsen/logrus/logger.go b/agent/vendor/github.com/sirupsen/logrus/logger.go index 337704457a2..5ff0aef6d3f 100644 --- a/agent/vendor/github.com/sirupsen/logrus/logger.go +++ b/agent/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d26792..95d8e59da69 100644 --- a/agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 00000000000..da867903e2f --- /dev/null +++ b/agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 00000000000..1701af2a3c8 --- /dev/null +++ b/agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertion_format.go b/agent/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a86..7880b8f9433 100644 --- a/agent/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/agent/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07e..339515b8bfb 100644 --- a/agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertion_order.go b/agent/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a7..75944878358 100644 --- a/agent/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/agent/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/agent/vendor/github.com/stretchr/testify/assert/assertions.go b/agent/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f57..fa1245b1897 100644 --- a/agent/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/agent/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/agent/vendor/github.com/stretchr/testify/require/require.go b/agent/vendor/github.com/stretchr/testify/require/require.go index 51820df2e67..880853f5a2c 100644 --- a/agent/vendor/github.com/stretchr/testify/require/require.go +++ b/agent/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { @@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/agent/vendor/github.com/stretchr/testify/require/require_forward.go b/agent/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83f3..960bf6f2cab 100644 --- a/agent/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/agent/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { @@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/agent/vendor/github.com/stretchr/testify/suite/suite.go b/agent/vendor/github.com/stretchr/testify/suite/suite.go index b9b5d1c56cf..895591878bf 100644 --- a/agent/vendor/github.com/stretchr/testify/suite/suite.go +++ b/agent/vendor/github.com/stretchr/testify/suite/suite.go @@ -7,6 +7,7 @@ import ( "reflect" "regexp" "runtime/debug" + "sync" "testing" "time" @@ -21,17 +22,22 @@ var matchMethod = flag.String("testify.m", "", "regular expression to select tes // retrieving the current *testing.T context. type Suite struct { *assert.Assertions + mu sync.RWMutex require *require.Assertions t *testing.T } // T retrieves the current *testing.T context. func (suite *Suite) T() *testing.T { + suite.mu.RLock() + defer suite.mu.RUnlock() return suite.t } // SetT sets the current *testing.T context. func (suite *Suite) SetT(t *testing.T) { + suite.mu.Lock() + defer suite.mu.Unlock() suite.t = t suite.Assertions = assert.New(t) suite.require = require.New(t) @@ -39,6 +45,8 @@ func (suite *Suite) SetT(t *testing.T) { // Require returns a require context for suite. func (suite *Suite) Require() *require.Assertions { + suite.mu.Lock() + defer suite.mu.Unlock() if suite.require == nil { suite.require = require.New(suite.T()) } @@ -51,14 +59,20 @@ func (suite *Suite) Require() *require.Assertions { // assert.Assertions with require.Assertions), this method is provided so you // can call `suite.Assert().NoError()`. func (suite *Suite) Assert() *assert.Assertions { + suite.mu.Lock() + defer suite.mu.Unlock() if suite.Assertions == nil { suite.Assertions = assert.New(suite.T()) } return suite.Assertions } -func failOnPanic(t *testing.T) { +func recoverAndFailOnPanic(t *testing.T) { r := recover() + failOnPanic(t, r) +} + +func failOnPanic(t *testing.T, r interface{}) { if r != nil { t.Errorf("test panicked: %v\n%s", r, debug.Stack()) t.FailNow() @@ -81,7 +95,7 @@ func (suite *Suite) Run(name string, subtest func()) bool { // Run takes a testing suite and runs all of the tests attached // to it. func Run(t *testing.T, suite TestingSuite) { - defer failOnPanic(t) + defer recoverAndFailOnPanic(t) suite.SetT(t) @@ -126,10 +140,12 @@ func Run(t *testing.T, suite TestingSuite) { F: func(t *testing.T) { parentT := suite.T() suite.SetT(t) - defer failOnPanic(t) + defer recoverAndFailOnPanic(t) defer func() { + r := recover() + if stats != nil { - passed := !t.Failed() + passed := !t.Failed() && r == nil stats.end(method.Name, passed) } @@ -142,6 +158,7 @@ func Run(t *testing.T, suite TestingSuite) { } suite.SetT(parentT) + failOnPanic(t, r) }() if setupTestSuite, ok := suite.(SetupTestSuite); ok { diff --git a/agent/vendor/github.com/vishvananda/netlink/.travis.yml b/agent/vendor/github.com/vishvananda/netlink/.travis.yml deleted file mode 100644 index 80219c69d6f..00000000000 --- a/agent/vendor/github.com/vishvananda/netlink/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go -go: - - "1.12.x" - - "1.13.x" - - "1.14.x" -before_script: - # make sure we keep path in tact when we sudo - - sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers - # modprobe ip_gre or else the first gre device can't be deleted - - sudo modprobe ip_gre - # modprobe nf_conntrack for the conntrack testing - - sudo modprobe nf_conntrack - - sudo modprobe nf_conntrack_netlink - - sudo modprobe nf_conntrack_ipv4 - - sudo modprobe nf_conntrack_ipv6 - - sudo modprobe sch_hfsc - - sudo modprobe sch_sfq -install: - - go get -v -t ./... -go_import_path: github.com/vishvananda/netlink diff --git a/agent/vendor/github.com/vishvananda/netlink/README.md b/agent/vendor/github.com/vishvananda/netlink/README.md index a88e2f41840..0128bc67d55 100644 --- a/agent/vendor/github.com/vishvananda/netlink/README.md +++ b/agent/vendor/github.com/vishvananda/netlink/README.md @@ -1,6 +1,6 @@ # netlink - netlink library for go # -[![Build Status](https://travis-ci.org/vishvananda/netlink.png?branch=master)](https://travis-ci.org/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) +![Build Status](https://github.com/vishvananda/netlink/actions/workflows/main.yml/badge.svg) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) The netlink package provides a simple netlink library for go. Netlink is the interface a user-space program in linux uses to communicate with diff --git a/agent/vendor/github.com/vishvananda/netlink/addr_linux.go b/agent/vendor/github.com/vishvananda/netlink/addr_linux.go index 71da251ca96..72862ce1f44 100644 --- a/agent/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -176,7 +176,7 @@ func AddrList(link Link, family int) ([]Addr, error) { // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(family) + msg := nl.NewIfAddrmsg(family) req.AddData(msg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) @@ -268,7 +268,7 @@ func parseAddr(m []byte) (addr Addr, family int, err error) { // But obviously, as there are IPv6 PtP addresses, too, // IFA_LOCAL should also be handled for IPv6. if local != nil { - if family == FAMILY_V4 && local.IP.Equal(dst.IP) { + if family == FAMILY_V4 && dst != nil && local.IP.Equal(dst.IP) { addr.IPNet = dst } else { addr.IPNet = local @@ -296,13 +296,13 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil) } // AddrSubscribeOptions contains a set of options to use with @@ -312,6 +312,7 @@ type AddrSubscribeOptions struct { ErrorCallback func(error) ListExisting bool ReceiveBufferSize int + ReceiveTimeout *unix.Timeval } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -322,14 +323,20 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize, options.ReceiveTimeout) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error { +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int, rcvTimeout *unix.Timeval) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if done != nil { go func() { <-done @@ -357,7 +364,8 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c msgs, from, err := s.Receive() if err != nil { if cberr != nil { - cberr(err) + cberr(fmt.Errorf("Receive failed: %v", + err)) } return } @@ -372,7 +380,6 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue diff --git a/agent/vendor/github.com/vishvananda/netlink/bpf_linux.go b/agent/vendor/github.com/vishvananda/netlink/bpf_linux.go index 6631626bfc0..96befbfe0ca 100644 --- a/agent/vendor/github.com/vishvananda/netlink/bpf_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/bpf_linux.go @@ -16,6 +16,30 @@ const ( BPF_PROG_TYPE_SCHED_ACT BPF_PROG_TYPE_TRACEPOINT BPF_PROG_TYPE_XDP + BPF_PROG_TYPE_PERF_EVENT + BPF_PROG_TYPE_CGROUP_SKB + BPF_PROG_TYPE_CGROUP_SOCK + BPF_PROG_TYPE_LWT_IN + BPF_PROG_TYPE_LWT_OUT + BPF_PROG_TYPE_LWT_XMIT + BPF_PROG_TYPE_SOCK_OPS + BPF_PROG_TYPE_SK_SKB + BPF_PROG_TYPE_CGROUP_DEVICE + BPF_PROG_TYPE_SK_MSG + BPF_PROG_TYPE_RAW_TRACEPOINT + BPF_PROG_TYPE_CGROUP_SOCK_ADDR + BPF_PROG_TYPE_LWT_SEG6LOCAL + BPF_PROG_TYPE_LIRC_MODE2 + BPF_PROG_TYPE_SK_REUSEPORT + BPF_PROG_TYPE_FLOW_DISSECTOR + BPF_PROG_TYPE_CGROUP_SYSCTL + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE + BPF_PROG_TYPE_CGROUP_SOCKOPT + BPF_PROG_TYPE_TRACING + BPF_PROG_TYPE_STRUCT_OPS + BPF_PROG_TYPE_EXT + BPF_PROG_TYPE_LSM + BPF_PROG_TYPE_SK_LOOKUP ) type BPFAttr struct { diff --git a/agent/vendor/github.com/vishvananda/netlink/class_linux.go b/agent/vendor/github.com/vishvananda/netlink/class_linux.go index e664ade7f50..6f542ba4e72 100644 --- a/agent/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/class_linux.go @@ -176,6 +176,12 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { options.AddRtAttr(nl.TCA_HTB_PARMS, opt.Serialize()) options.AddRtAttr(nl.TCA_HTB_RTAB, SerializeRtab(rtab)) options.AddRtAttr(nl.TCA_HTB_CTAB, SerializeRtab(ctab)) + if htb.Rate >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_HTB_RATE64, nl.Uint64Attr(htb.Rate)) + } + if htb.Ceil >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_HTB_CEIL64, nl.Uint64Attr(htb.Ceil)) + } case "hfsc": hfsc := class.(*HfscClass) opt := nl.HfscCopt{} @@ -185,9 +191,9 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { opt.Fsc.Set(fm1/8, fd, fm2/8) um1, ud, um2 := hfsc.Usc.Attrs() opt.Usc.Set(um1/8, ud, um2/8) - nl.NewRtAttrChild(options, nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) - nl.NewRtAttrChild(options, nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) - nl.NewRtAttrChild(options, nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) + options.AddRtAttr(nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) + options.AddRtAttr(nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) + options.AddRtAttr(nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) } req.AddData(options) return nil @@ -306,6 +312,10 @@ func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, erro htb.Quantum = opt.Quantum htb.Level = opt.Level htb.Prio = opt.Prio + case nl.TCA_HTB_RATE64: + htb.Rate = native.Uint64(datum.Value[0:8]) + case nl.TCA_HTB_CEIL64: + htb.Ceil = native.Uint64(datum.Value[0:8]) } } return detailed, nil @@ -331,7 +341,6 @@ func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, err func parseTcStats(data []byte) (*ClassStatistics, error) { buf := &bytes.Buffer{} buf.Write(data) - native := nl.NativeEndian() tcStats := &tcStats{} if err := binary.Read(buf, native, tcStats); err != nil { return nil, err @@ -353,7 +362,6 @@ func parseTcStats(data []byte) (*ClassStatistics, error) { func parseGnetStats(data []byte, gnetStats interface{}) error { buf := &bytes.Buffer{} buf.Write(data) - native := nl.NativeEndian() return binary.Read(buf, native, gnetStats) } diff --git a/agent/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/agent/vendor/github.com/vishvananda/netlink/conntrack_linux.go index ab91f4e55be..03ea1b98fc2 100644 --- a/agent/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "net" + "time" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" @@ -145,16 +146,23 @@ type ConntrackFlow struct { Forward ipTuple Reverse ipTuple Mark uint32 + TimeStart uint64 + TimeStop uint64 + TimeOut uint32 } func (s *ConntrackFlow) String() string { // conntrack cmd output: // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 - return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=%d", + // start=2019-07-26 01:26:21.557800506 +0000 UTC stop=1970-01-01 00:00:00 +0000 UTC timeout=30(sec) + start := time.Unix(0, int64(s.TimeStart)) + stop := time.Unix(0, int64(s.TimeStop)) + timeout := int32(s.TimeOut) + return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x start=%v stop=%v timeout=%d(sec)", nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes, s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes, - s.Mark) + s.Mark, start, stop, timeout) } // This method parse the ip tuple structure @@ -174,25 +182,43 @@ func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 { tpl.DstIP = v } } - // Skip the next 4 bytes nl.NLA_F_NESTED|nl.CTA_TUPLE_PROTO - reader.Seek(4, seekCurrent) - _, t, _, v := parseNfAttrTLV(reader) + // Get total length of nested protocol-specific info. + _, _, protoInfoTotalLen := parseNfAttrTL(reader) + _, t, l, v := parseNfAttrTLV(reader) + // Track the number of bytes read. + protoInfoBytesRead := uint16(nl.SizeofNfattr) + l if t == nl.CTA_PROTO_NUM { tpl.Protocol = uint8(v[0]) } - // Skip some padding 3 bytes + // We only parse TCP & UDP headers. Skip the others. + if tpl.Protocol != 6 && tpl.Protocol != 17 { + // skip the rest + bytesRemaining := protoInfoTotalLen - protoInfoBytesRead + reader.Seek(int64(bytesRemaining), seekCurrent) + return tpl.Protocol + } + // Skip 3 bytes of padding reader.Seek(3, seekCurrent) + protoInfoBytesRead += 3 for i := 0; i < 2; i++ { _, t, _ := parseNfAttrTL(reader) + protoInfoBytesRead += uint16(nl.SizeofNfattr) switch t { case nl.CTA_PROTO_SRC_PORT: parseBERaw16(reader, &tpl.SrcPort) + protoInfoBytesRead += 2 case nl.CTA_PROTO_DST_PORT: parseBERaw16(reader, &tpl.DstPort) + protoInfoBytesRead += 2 } - // Skip some padding 2 byte + // Skip 2 bytes of padding reader.Seek(2, seekCurrent) + protoInfoBytesRead += 2 } + // Skip any remaining/unknown parts of the message + bytesRemaining := protoInfoTotalLen - protoInfoBytesRead + reader.Seek(int64(bytesRemaining), seekCurrent) + return tpl.Protocol } @@ -211,10 +237,14 @@ func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) { binary.Read(r, nl.NativeEndian(), &attrType) isNested = (attrType & nl.NLA_F_NESTED) == nl.NLA_F_NESTED attrType = attrType & (nl.NLA_F_NESTED - 1) - return isNested, attrType, len } +func skipNfAttrValue(r *bytes.Reader, len uint16) { + len = (len + nl.NLA_ALIGNTO - 1) & ^(nl.NLA_ALIGNTO - 1) + r.Seek(int64(len), seekCurrent) +} + func parseBERaw16(r *bytes.Reader, v *uint16) { binary.Read(r, binary.BigEndian, v) } @@ -241,6 +271,36 @@ func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { return } +// when the flow is alive, only the timestamp_start is returned in structure +func parseTimeStamp(r *bytes.Reader, readSize uint16) (tstart, tstop uint64) { + var numTimeStamps int + oneItem := nl.SizeofNfattr + 8 // 4 bytes attr header + 8 bytes timestamp + if readSize == uint16(oneItem) { + numTimeStamps = 1 + } else if readSize == 2*uint16(oneItem) { + numTimeStamps = 2 + } else { + return + } + for i := 0; i < numTimeStamps; i++ { + switch _, t, _ := parseNfAttrTL(r); t { + case nl.CTA_TIMESTAMP_START: + parseBERaw64(r, &tstart) + case nl.CTA_TIMESTAMP_STOP: + parseBERaw64(r, &tstop) + default: + return + } + } + return + +} + +func parseTimeOut(r *bytes.Reader) (ttimeout uint32) { + parseBERaw32(r, &ttimeout) + return +} + func parseConnectionMark(r *bytes.Reader) (mark uint32) { parseBERaw32(r, &mark) return @@ -266,25 +326,37 @@ func parseRawData(data []byte) *ConntrackFlow { if nested, t, l := parseNfAttrTL(reader); nested { switch t { case nl.CTA_TUPLE_ORIG: - if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + if nested, t, l = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { parseIpTuple(reader, &s.Forward) } case nl.CTA_TUPLE_REPLY: - if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + if nested, t, l = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { parseIpTuple(reader, &s.Reverse) } else { // Header not recognized skip it - reader.Seek(int64(l), seekCurrent) + skipNfAttrValue(reader, l) } case nl.CTA_COUNTERS_ORIG: s.Forward.Bytes, s.Forward.Packets = parseByteAndPacketCounters(reader) case nl.CTA_COUNTERS_REPLY: s.Reverse.Bytes, s.Reverse.Packets = parseByteAndPacketCounters(reader) + case nl.CTA_TIMESTAMP: + s.TimeStart, s.TimeStop = parseTimeStamp(reader, l) + case nl.CTA_PROTOINFO: + skipNfAttrValue(reader, l) + default: + skipNfAttrValue(reader, l) } } else { switch t { case nl.CTA_MARK: s.Mark = parseConnectionMark(reader) + case nl.CTA_TIMEOUT: + s.TimeOut = parseTimeOut(reader) + case nl.CTA_STATUS, nl.CTA_USE, nl.CTA_ID: + skipNfAttrValue(reader, l) + default: + skipNfAttrValue(reader, l) } } } @@ -346,23 +418,34 @@ type CustomConntrackFilter interface { } type ConntrackFilter struct { - ipFilter map[ConntrackFilterType]net.IP + ipNetFilter map[ConntrackFilterType]*net.IPNet portFilter map[ConntrackFilterType]uint16 protoFilter uint8 } -// AddIP adds an IP to the conntrack filter -func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error { - if f.ipFilter == nil { - f.ipFilter = make(map[ConntrackFilterType]net.IP) +// AddIPNet adds a IP subnet to the conntrack filter +func (f *ConntrackFilter) AddIPNet(tp ConntrackFilterType, ipNet *net.IPNet) error { + if ipNet == nil { + return fmt.Errorf("Filter attribute empty") } - if _, ok := f.ipFilter[tp]; ok { + if f.ipNetFilter == nil { + f.ipNetFilter = make(map[ConntrackFilterType]*net.IPNet) + } + if _, ok := f.ipNetFilter[tp]; ok { return errors.New("Filter attribute already present") } - f.ipFilter[tp] = ip + f.ipNetFilter[tp] = ipNet return nil } +// AddIP adds an IP to the conntrack filter +func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error { + if ip == nil { + return fmt.Errorf("Filter attribute empty") + } + return f.AddIPNet(tp, NewIPNet(ip)) +} + // AddPort adds a Port to the conntrack filter if the Layer 4 protocol allows it func (f *ConntrackFilter) AddPort(tp ConntrackFilterType, port uint16) error { switch f.protoFilter { @@ -394,7 +477,7 @@ func (f *ConntrackFilter) AddProtocol(proto uint8) error { // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter // false otherwise func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { - if len(f.ipFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 { + if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 { // empty filter always not match return false } @@ -408,30 +491,30 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { match := true // IP conntrack filter - if len(f.ipFilter) > 0 { + if len(f.ipNetFilter) > 0 { // -orig-src ip Source address from original direction - if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found { - match = match && elem.Equal(flow.Forward.SrcIP) + if elem, found := f.ipNetFilter[ConntrackOrigSrcIP]; found { + match = match && elem.Contains(flow.Forward.SrcIP) } // -orig-dst ip Destination address from original direction - if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found { - match = match && elem.Equal(flow.Forward.DstIP) + if elem, found := f.ipNetFilter[ConntrackOrigDstIP]; match && found { + match = match && elem.Contains(flow.Forward.DstIP) } // -src-nat ip Source NAT ip - if elem, found := f.ipFilter[ConntrackReplySrcIP]; match && found { - match = match && elem.Equal(flow.Reverse.SrcIP) + if elem, found := f.ipNetFilter[ConntrackReplySrcIP]; match && found { + match = match && elem.Contains(flow.Reverse.SrcIP) } // -dst-nat ip Destination NAT ip - if elem, found := f.ipFilter[ConntrackReplyDstIP]; match && found { - match = match && elem.Equal(flow.Reverse.DstIP) + if elem, found := f.ipNetFilter[ConntrackReplyDstIP]; match && found { + match = match && elem.Contains(flow.Reverse.DstIP) } // Match source or destination reply IP - if elem, found := f.ipFilter[ConntrackReplyAnyIP]; match && found { - match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP)) + if elem, found := f.ipNetFilter[ConntrackReplyAnyIP]; match && found { + match = match && (elem.Contains(flow.Reverse.SrcIP) || elem.Contains(flow.Reverse.DstIP)) } } diff --git a/agent/vendor/github.com/vishvananda/netlink/devlink_linux.go b/agent/vendor/github.com/vishvananda/netlink/devlink_linux.go index 29b3f8ec1d2..358b232c6c5 100644 --- a/agent/vendor/github.com/vishvananda/netlink/devlink_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -1,9 +1,11 @@ package netlink import ( + "fmt" + "net" + "strings" "syscall" - "fmt" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) @@ -27,6 +29,61 @@ type DevlinkDevice struct { Attrs DevlinkDevAttrs } +// DevlinkPortFn represents port function and its attributes +type DevlinkPortFn struct { + HwAddr net.HardwareAddr + State uint8 + OpState uint8 +} + +// DevlinkPortFnSetAttrs represents attributes to set +type DevlinkPortFnSetAttrs struct { + FnAttrs DevlinkPortFn + HwAddrValid bool + StateValid bool +} + +// DevlinkPort represents port and its attributes +type DevlinkPort struct { + BusName string + DeviceName string + PortIndex uint32 + PortType uint16 + NetdeviceName string + NetdevIfIndex uint32 + RdmaDeviceName string + PortFlavour uint16 + Fn *DevlinkPortFn +} + +type DevLinkPortAddAttrs struct { + Controller uint32 + SfNumber uint32 + PortIndex uint32 + PfNumber uint16 + SfNumberValid bool + PortIndexValid bool + ControllerValid bool +} + +// DevlinkDeviceInfo represents devlink info +type DevlinkDeviceInfo struct { + Driver string + SerialNumber string + BoardID string + FwApp string + FwAppBoundleID string + FwAppName string + FwBoundleID string + FwMgmt string + FwMgmtAPI string + FwMgmtBuild string + FwNetlist string + FwNetlistBuild string + FwPsidAPI string + FwUndi string +} + func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) { devices := make([]*DevlinkDevice, 0, len(msgs)) for _, m := range msgs { @@ -95,9 +152,9 @@ func (d *DevlinkDevice) parseAttributes(attrs []syscall.NetlinkRouteAttr) error for _, a := range attrs { switch a.Attr.Type { case nl.DEVLINK_ATTR_BUS_NAME: - d.BusName = string(a.Value) + d.BusName = string(a.Value[:len(a.Value)-1]) case nl.DEVLINK_ATTR_DEV_NAME: - d.DeviceName = string(a.Value) + d.DeviceName = string(a.Value[:len(a.Value)-1]) case nl.DEVLINK_ATTR_ESWITCH_MODE: d.Attrs.Eswitch.Mode = parseEswitchMode(native.Uint16(a.Value)) case nl.DEVLINK_ATTR_ESWITCH_INLINE_MODE: @@ -126,12 +183,12 @@ func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { req := h.newNetlinkRequest(int(family.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK) req.AddData(msg) - b := make([]byte, len(dev.BusName)) + b := make([]byte, len(dev.BusName)+1) copy(b, dev.BusName) data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) req.AddData(data) - b = make([]byte, len(dev.DeviceName)) + b = make([]byte, len(dev.DeviceName)+1) copy(b, dev.DeviceName) data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) req.AddData(data) @@ -270,3 +327,402 @@ func (h *Handle) DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error func DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { return pkgHandle.DevLinkSetEswitchMode(Dev, NewMode) } + +func (port *DevlinkPort) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + for _, a := range attrs { + switch a.Attr.Type { + case nl.DEVLINK_ATTR_BUS_NAME: + port.BusName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_DEV_NAME: + port.DeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_INDEX: + port.PortIndex = native.Uint32(a.Value) + case nl.DEVLINK_ATTR_PORT_TYPE: + port.PortType = native.Uint16(a.Value) + case nl.DEVLINK_ATTR_PORT_NETDEV_NAME: + port.NetdeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_NETDEV_IFINDEX: + port.NetdevIfIndex = native.Uint32(a.Value) + case nl.DEVLINK_ATTR_PORT_IBDEV_NAME: + port.RdmaDeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_FLAVOUR: + port.PortFlavour = native.Uint16(a.Value) + case nl.DEVLINK_ATTR_PORT_FUNCTION: + port.Fn = &DevlinkPortFn{} + for nested := range nl.ParseAttributes(a.Value) { + switch nested.Type { + case nl.DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR: + port.Fn.HwAddr = nested.Value[:] + case nl.DEVLINK_PORT_FN_ATTR_STATE: + port.Fn.State = uint8(nested.Value[0]) + case nl.DEVLINK_PORT_FN_ATTR_OPSTATE: + port.Fn.OpState = uint8(nested.Value[0]) + } + } + } + } + return nil +} + +func parseDevLinkAllPortList(msgs [][]byte) ([]*DevlinkPort, error) { + ports := make([]*DevlinkPort, 0, len(msgs)) + for _, m := range msgs { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + port := &DevlinkPort{} + if err = port.parseAttributes(attrs); err != nil { + return nil, err + } + ports = append(ports, port) + } + return ports, nil +} + +// DevLinkGetPortList provides a pointer to devlink ports and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, err + } + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_PORT_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + ports, err := parseDevLinkAllPortList(msgs) + if err != nil { + return nil, err + } + return ports, nil +} + +// DevLinkGetPortList provides a pointer to devlink ports and nil error, +// otherwise returns an error code. +func DevLinkGetAllPortList() ([]*DevlinkPort, error) { + return pkgHandle.DevLinkGetAllPortList() +} + +func parseDevlinkPortMsg(msgs [][]byte) (*DevlinkPort, error) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + port := &DevlinkPort{} + if err = port.parseAttributes(attrs); err != nil { + return nil, err + } + return port, nil +} + +// DevLinkGetPortByIndexprovides a pointer to devlink device and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_GET, Bus, Device) + if err != nil { + return nil, err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + port, err := parseDevlinkPortMsg(respmsg) + return port, err +} + +// DevLinkGetPortByIndex provides a pointer to devlink portand nil error, +// otherwise returns an error code. +func DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { + return pkgHandle.DevLinkGetPortByIndex(Bus, Device, PortIndex) +} + +// DevLinkPortAdd adds a devlink port and returns a port on success +// otherwise returns nil port and an error code. +func (h *Handle) DevLinkPortAdd(Bus string, Device string, Flavour uint16, Attrs DevLinkPortAddAttrs) (*DevlinkPort, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_NEW, Bus, Device) + if err != nil { + return nil, err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_FLAVOUR, nl.Uint16Attr(Flavour))) + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_PCI_PF_NUMBER, nl.Uint16Attr(Attrs.PfNumber))) + if Flavour == nl.DEVLINK_PORT_FLAVOUR_PCI_SF && Attrs.SfNumberValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_PCI_SF_NUMBER, nl.Uint32Attr(Attrs.SfNumber))) + } + if Attrs.PortIndexValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(Attrs.PortIndex))) + } + if Attrs.ControllerValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, nl.Uint32Attr(Attrs.Controller))) + } + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + port, err := parseDevlinkPortMsg(respmsg) + return port, err +} + +// DevLinkPortAdd adds a devlink port and returns a port on success +// otherwise returns nil port and an error code. +func DevLinkPortAdd(Bus string, Device string, Flavour uint16, Attrs DevLinkPortAddAttrs) (*DevlinkPort, error) { + return pkgHandle.DevLinkPortAdd(Bus, Device, Flavour, Attrs) +} + +// DevLinkPortDel deletes a devlink port and returns success or error code. +func (h *Handle) DevLinkPortDel(Bus string, Device string, PortIndex uint32) error { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_DEL, Bus, Device) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevLinkPortDel deletes a devlink port and returns success or error code. +func DevLinkPortDel(Bus string, Device string, PortIndex uint32) error { + return pkgHandle.DevLinkPortDel(Bus, Device, PortIndex) +} + +// DevlinkPortFnSet sets one or more port function attributes specified by the attribute mask. +// It returns 0 on success or error code. +func (h *Handle) DevlinkPortFnSet(Bus string, Device string, PortIndex uint32, FnAttrs DevlinkPortFnSetAttrs) error { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_SET, Bus, Device) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + + fnAttr := nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_FUNCTION|unix.NLA_F_NESTED, nil) + + if FnAttrs.HwAddrValid { + fnAttr.AddRtAttr(nl.DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, []byte(FnAttrs.FnAttrs.HwAddr)) + } + + if FnAttrs.StateValid { + fnAttr.AddRtAttr(nl.DEVLINK_PORT_FN_ATTR_STATE, nl.Uint8Attr(FnAttrs.FnAttrs.State)) + } + req.AddData(fnAttr) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevlinkPortFnSet sets one or more port function attributes specified by the attribute mask. +// It returns 0 on success or error code. +func DevlinkPortFnSet(Bus string, Device string, PortIndex uint32, FnAttrs DevlinkPortFnSetAttrs) error { + return pkgHandle.DevlinkPortFnSet(Bus, Device, PortIndex, FnAttrs) +} + +// devlinkInfoGetter is function that is responsible for getting devlink info message +// this is introduced for test purpose +type devlinkInfoGetter func(bus, device string) ([]byte, error) + +// DevlinkGetDeviceInfoByName returns devlink info for selected device, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func (h *Handle) DevlinkGetDeviceInfoByName(Bus string, Device string, getInfoMsg devlinkInfoGetter) (*DevlinkDeviceInfo, error) { + info, err := h.DevlinkGetDeviceInfoByNameAsMap(Bus, Device, getInfoMsg) + if err != nil { + return nil, err + } + + return parseInfoData(info), nil +} + +// DevlinkGetDeviceInfoByName returns devlink info for selected device, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func DevlinkGetDeviceInfoByName(Bus string, Device string) (*DevlinkDeviceInfo, error) { + return pkgHandle.DevlinkGetDeviceInfoByName(Bus, Device, pkgHandle.getDevlinkInfoMsg) +} + +// DevlinkGetDeviceInfoByNameAsMap returns devlink info for selected device as a map, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func (h *Handle) DevlinkGetDeviceInfoByNameAsMap(Bus string, Device string, getInfoMsg devlinkInfoGetter) (map[string]string, error) { + response, err := getInfoMsg(Bus, Device) + if err != nil { + return nil, err + } + + info, err := parseInfoMsg(response) + if err != nil { + return nil, err + } + + return info, nil +} + +// DevlinkGetDeviceInfoByNameAsMap returns devlink info for selected device as a map, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func DevlinkGetDeviceInfoByNameAsMap(Bus string, Device string) (map[string]string, error) { + return pkgHandle.DevlinkGetDeviceInfoByNameAsMap(Bus, Device, pkgHandle.getDevlinkInfoMsg) +} + +// GetDevlinkInfo returns devlink info for target device, +// otherwise returns an error code. +func (d *DevlinkDevice) GetDevlinkInfo() (*DevlinkDeviceInfo, error) { + return pkgHandle.DevlinkGetDeviceInfoByName(d.BusName, d.DeviceName, pkgHandle.getDevlinkInfoMsg) +} + +// GetDevlinkInfoAsMap returns devlink info for target device as a map, +// otherwise returns an error code. +func (d *DevlinkDevice) GetDevlinkInfoAsMap() (map[string]string, error) { + return pkgHandle.DevlinkGetDeviceInfoByNameAsMap(d.BusName, d.DeviceName, pkgHandle.getDevlinkInfoMsg) +} + +func (h *Handle) getDevlinkInfoMsg(bus, device string) ([]byte, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_INFO_GET, bus, device) + if err != nil { + return nil, err + } + + response, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + if len(response) < 1 { + return nil, fmt.Errorf("getDevlinkInfoMsg: message too short") + } + + return response[0], nil +} + +func parseInfoMsg(msg []byte) (map[string]string, error) { + if len(msg) < nl.SizeofGenlmsg { + return nil, fmt.Errorf("parseInfoMsg: message too short") + } + + info := make(map[string]string) + err := collectInfoData(msg[nl.SizeofGenlmsg:], info) + + if err != nil { + return nil, err + } + + return info, nil +} + +func collectInfoData(msg []byte, data map[string]string) error { + attrs, err := nl.ParseRouteAttr(msg) + if err != nil { + return err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.DEVLINK_ATTR_INFO_DRIVER_NAME: + data["driver"] = parseInfoValue(attr.Value) + case nl.DEVLINK_ATTR_INFO_SERIAL_NUMBER: + data["serialNumber"] = parseInfoValue(attr.Value) + case nl.DEVLINK_ATTR_INFO_VERSION_RUNNING, nl.DEVLINK_ATTR_INFO_VERSION_FIXED, + nl.DEVLINK_ATTR_INFO_VERSION_STORED: + key, value, err := getNestedInfoData(attr.Value) + if err != nil { + return err + } + data[key] = value + } + } + + if len(data) == 0 { + return fmt.Errorf("collectInfoData: could not read attributes") + } + + return nil +} + +func getNestedInfoData(msg []byte) (string, string, error) { + nestedAttrs, err := nl.ParseRouteAttr(msg) + + var key, value string + + if err != nil { + return "", "", err + } + + if len(nestedAttrs) != 2 { + return "", "", fmt.Errorf("getNestedInfoData: too few attributes in nested structure") + } + + for _, nestedAttr := range nestedAttrs { + switch nestedAttr.Attr.Type { + case nl.DEVLINK_ATTR_INFO_VERSION_NAME: + key = parseInfoValue(nestedAttr.Value) + case nl.DEVLINK_ATTR_INFO_VERSION_VALUE: + value = parseInfoValue(nestedAttr.Value) + } + } + + if key == "" { + return "", "", fmt.Errorf("getNestedInfoData: key not found") + } + + if value == "" { + return "", "", fmt.Errorf("getNestedInfoData: value not found") + } + + return key, value, nil +} + +func parseInfoData(data map[string]string) *DevlinkDeviceInfo { + info := new(DevlinkDeviceInfo) + for key, value := range data { + switch key { + case "driver": + info.Driver = value + case "serialNumber": + info.SerialNumber = value + case "board.id": + info.BoardID = value + case "fw.app": + info.FwApp = value + case "fw.app.bundle_id": + info.FwAppBoundleID = value + case "fw.app.name": + info.FwAppName = value + case "fw.bundle_id": + info.FwBoundleID = value + case "fw.mgmt": + info.FwMgmt = value + case "fw.mgmt.api": + info.FwMgmtAPI = value + case "fw.mgmt.build": + info.FwMgmtBuild = value + case "fw.netlist": + info.FwNetlist = value + case "fw.netlist.build": + info.FwNetlistBuild = value + case "fw.psid.api": + info.FwPsidAPI = value + case "fw.undi": + info.FwUndi = value + } + } + return info +} + +func parseInfoValue(value []byte) string { + v := strings.ReplaceAll(string(value), "\x00", "") + return strings.TrimSpace(v) +} diff --git a/agent/vendor/github.com/vishvananda/netlink/filter.go b/agent/vendor/github.com/vishvananda/netlink/filter.go index 2dc34b99592..2d798b0fbbc 100644 --- a/agent/vendor/github.com/vishvananda/netlink/filter.go +++ b/agent/vendor/github.com/vishvananda/netlink/filter.go @@ -157,6 +157,39 @@ func NewConnmarkAction() *ConnmarkAction { } } +type CsumUpdateFlags uint32 + +const ( + TCA_CSUM_UPDATE_FLAG_IPV4HDR CsumUpdateFlags = 1 + TCA_CSUM_UPDATE_FLAG_ICMP CsumUpdateFlags = 2 + TCA_CSUM_UPDATE_FLAG_IGMP CsumUpdateFlags = 4 + TCA_CSUM_UPDATE_FLAG_TCP CsumUpdateFlags = 8 + TCA_CSUM_UPDATE_FLAG_UDP CsumUpdateFlags = 16 + TCA_CSUM_UPDATE_FLAG_UDPLITE CsumUpdateFlags = 32 + TCA_CSUM_UPDATE_FLAG_SCTP CsumUpdateFlags = 64 +) + +type CsumAction struct { + ActionAttrs + UpdateFlags CsumUpdateFlags +} + +func (action *CsumAction) Type() string { + return "csum" +} + +func (action *CsumAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewCsumAction() *CsumAction { + return &CsumAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + type MirredAct uint8 func (a MirredAct) String() string { @@ -260,6 +293,40 @@ func NewSkbEditAction() *SkbEditAction { } } +type PoliceAction struct { + ActionAttrs + Rate uint32 // in byte per second + Burst uint32 // in byte + RCellLog int + Mtu uint32 + Mpu uint16 // in byte + PeakRate uint32 // in byte per second + PCellLog int + AvRate uint32 // in byte per second + Overhead uint16 + LinkLayer int + ExceedAction TcPolAct + NotExceedAction TcPolAct +} + +func (action *PoliceAction) Type() string { + return "police" +} + +func (action *PoliceAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewPoliceAction() *PoliceAction { + return &PoliceAction{ + RCellLog: -1, + PCellLog: -1, + LinkLayer: 1, // ETHERNET + ExceedAction: TC_POLICE_RECLASSIFY, + NotExceedAction: TC_POLICE_OK, + } +} + // MatchAll filters match all packets type MatchAll struct { FilterAttrs @@ -275,20 +342,20 @@ func (filter *MatchAll) Type() string { return "matchall" } -type FilterFwAttrs struct { - ClassId uint32 - InDev string - Mask uint32 - Index uint32 - Buffer uint32 - Mtu uint32 - Mpu uint16 - Rate uint32 - AvRate uint32 - PeakRate uint32 - Action TcPolAct - Overhead uint16 - LinkLayer int +type FwFilter struct { + FilterAttrs + ClassId uint32 + InDev string + Mask uint32 + Police *PoliceAction +} + +func (filter *FwFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *FwFilter) Type() string { + return "fw" } type BpfFilter struct { diff --git a/agent/vendor/github.com/vishvananda/netlink/filter_linux.go b/agent/vendor/github.com/vishvananda/netlink/filter_linux.go index ef6fabe8114..4c6d1cf7d7f 100644 --- a/agent/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "net" "syscall" "github.com/vishvananda/netlink/nl" @@ -36,6 +37,7 @@ type U32 struct { ClassId uint32 Divisor uint32 // Divisor MUST be power of 2. Hash uint32 + Link uint32 RedirIndex int Sel *TcU32Sel Actions []Action @@ -49,74 +51,129 @@ func (filter *U32) Type() string { return "u32" } -// Fw filter filters on firewall marks -// NOTE: this is in filter_linux because it refers to nl.TcPolice which -// is defined in nl/tc_linux.go -type Fw struct { +type Flower struct { FilterAttrs - ClassId uint32 - // TODO remove nl type from interface - Police nl.TcPolice - InDev string - // TODO Action - Mask uint32 - AvRate uint32 - Rtab [256]uint32 - Ptab [256]uint32 -} - -func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { - var rtab [256]uint32 - var ptab [256]uint32 - rcellLog := -1 - pcellLog := -1 - avrate := fattrs.AvRate / 8 - police := nl.TcPolice{} - police.Rate.Rate = fattrs.Rate / 8 - police.PeakRate.Rate = fattrs.PeakRate / 8 - buffer := fattrs.Buffer - linklayer := nl.LINKLAYER_ETHERNET + DestIP net.IP + DestIPMask net.IPMask + SrcIP net.IP + SrcIPMask net.IPMask + EthType uint16 + EncDestIP net.IP + EncDestIPMask net.IPMask + EncSrcIP net.IP + EncSrcIPMask net.IPMask + EncDestPort uint16 + EncKeyId uint32 + + Actions []Action +} - if fattrs.LinkLayer != nl.LINKLAYER_UNSPEC { - linklayer = fattrs.LinkLayer - } +func (filter *Flower) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} - police.Action = int32(fattrs.Action) - if police.Rate.Rate != 0 { - police.Rate.Mpu = fattrs.Mpu - police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { - return nil, errors.New("TBF: failed to calculate rate table") - } - police.Burst = Xmittime(uint64(police.Rate.Rate), uint32(buffer)) +func (filter *Flower) Type() string { + return "flower" +} + +func (filter *Flower) encodeIP(parent *nl.RtAttr, ip net.IP, mask net.IPMask, v4Type, v6Type int, v4MaskType, v6MaskType int) { + ipType := v4Type + maskType := v4MaskType + + encodeMask := mask + if mask == nil { + encodeMask = net.CIDRMask(32, 32) } - police.Mtu = fattrs.Mtu - if police.PeakRate.Rate != 0 { - police.PeakRate.Mpu = fattrs.Mpu - police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { - return nil, errors.New("POLICE: failed to calculate peak rate table") + v4IP := ip.To4() + if v4IP == nil { + ipType = v6Type + maskType = v6MaskType + if mask == nil { + encodeMask = net.CIDRMask(128, 128) } + } else { + ip = v4IP } - return &Fw{ - FilterAttrs: attrs, - ClassId: fattrs.ClassId, - InDev: fattrs.InDev, - Mask: fattrs.Mask, - Police: police, - AvRate: avrate, - Rtab: rtab, - Ptab: ptab, - }, nil + parent.AddRtAttr(ipType, ip) + parent.AddRtAttr(maskType, encodeMask) } -func (filter *Fw) Attrs() *FilterAttrs { - return &filter.FilterAttrs +func (filter *Flower) encode(parent *nl.RtAttr) error { + if filter.EthType != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_TYPE, htons(filter.EthType)) + } + if filter.SrcIP != nil { + filter.encodeIP(parent, filter.SrcIP, filter.SrcIPMask, + nl.TCA_FLOWER_KEY_IPV4_SRC, nl.TCA_FLOWER_KEY_IPV6_SRC, + nl.TCA_FLOWER_KEY_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_IPV6_SRC_MASK) + } + if filter.DestIP != nil { + filter.encodeIP(parent, filter.DestIP, filter.DestIPMask, + nl.TCA_FLOWER_KEY_IPV4_DST, nl.TCA_FLOWER_KEY_IPV6_DST, + nl.TCA_FLOWER_KEY_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_IPV6_DST_MASK) + } + if filter.EncSrcIP != nil { + filter.encodeIP(parent, filter.EncSrcIP, filter.EncSrcIPMask, + nl.TCA_FLOWER_KEY_ENC_IPV4_SRC, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC, + nl.TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK) + } + if filter.EncDestIP != nil { + filter.encodeIP(parent, filter.EncDestIP, filter.EncSrcIPMask, + nl.TCA_FLOWER_KEY_ENC_IPV4_DST, nl.TCA_FLOWER_KEY_ENC_IPV6_DST, + nl.TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_DST_MASK) + } + if filter.EncDestPort != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_UDP_DST_PORT, htons(filter.EncDestPort)) + } + if filter.EncKeyId != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId)) + } + + actionsAttr := parent.AddRtAttr(nl.TCA_FLOWER_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + return nil } -func (filter *Fw) Type() string { - return "fw" +func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FLOWER_KEY_ETH_TYPE: + filter.EthType = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_IPV4_SRC, nl.TCA_FLOWER_KEY_IPV6_SRC: + filter.SrcIP = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_IPV6_SRC_MASK: + filter.SrcIPMask = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_DST, nl.TCA_FLOWER_KEY_IPV6_DST: + filter.DestIP = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_IPV6_DST_MASK: + filter.DestIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_SRC, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC: + filter.EncSrcIP = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK: + filter.EncSrcIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_DST, nl.TCA_FLOWER_KEY_ENC_IPV6_DST: + filter.EncDestIP = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_DST_MASK: + filter.EncDestIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_UDP_DST_PORT: + filter.EncDestPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_ENC_KEY_ID: + filter.EncKeyId = ntohl(datum.Value) + case nl.TCA_FLOWER_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return err + } + filter.Actions, err = parseActions(tables) + if err != nil { + return err + } + } + } + return nil } // FilterDel will delete a filter from the system. @@ -168,7 +225,6 @@ func (h *Handle) FilterReplace(filter Filter) error { } func (h *Handle) filterModify(filter Filter, flags int) error { - native = nl.NativeEndian() req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ @@ -225,6 +281,9 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.Hash != 0 { options.AddRtAttr(nl.TCA_U32_HASH, nl.Uint32Attr(filter.Hash)) } + if filter.Link != 0 { + options.AddRtAttr(nl.TCA_U32_LINK, nl.Uint32Attr(filter.Link)) + } actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil) // backwards compatibility if filter.RedirIndex != 0 { @@ -233,7 +292,7 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if err := EncodeActions(actionsAttr, filter.Actions); err != nil { return err } - case *Fw: + case *FwFilter: if filter.Mask != 0 { b := make([]byte, 4) native.PutUint32(b, filter.Mask) @@ -242,17 +301,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.InDev != "" { options.AddRtAttr(nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) } - if (filter.Police != nl.TcPolice{}) { - + if filter.Police != nil { police := options.AddRtAttr(nl.TCA_FW_POLICE, nil) - police.AddRtAttr(nl.TCA_POLICE_TBF, filter.Police.Serialize()) - if (filter.Police.Rate != nl.TcRateSpec{}) { - payload := SerializeRtab(filter.Rtab) - police.AddRtAttr(nl.TCA_POLICE_RATE, payload) - } - if (filter.Police.PeakRate != nl.TcRateSpec{}) { - payload := SerializeRtab(filter.Ptab) - police.AddRtAttr(nl.TCA_POLICE_PEAKRATE, payload) + if err := encodePolice(police, filter.Police); err != nil { + return err } } if filter.ClassId != 0 { @@ -283,6 +335,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.ClassId != 0 { options.AddRtAttr(nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId)) } + case *Flower: + if err := filter.encode(options); err != nil { + return err + } } req.AddData(options) @@ -346,11 +402,13 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { case "u32": filter = &U32{} case "fw": - filter = &Fw{} + filter = &FwFilter{} case "bpf": filter = &BpfFilter{} case "matchall": filter = &MatchAll{} + case "flower": + filter = &Flower{} default: filter = &GenericFilter{FilterType: filterType} } @@ -380,6 +438,11 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { if err != nil { return nil, err } + case "flower": + detailed, err = parseFlowerData(filter, data) + if err != nil { + return nil, err + } default: detailed = true } @@ -411,6 +474,53 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { attrs.Bindcnt = int(tcgen.Bindcnt) } +func encodePolice(attr *nl.RtAttr, action *PoliceAction) error { + var rtab [256]uint32 + var ptab [256]uint32 + police := nl.TcPolice{} + police.Index = uint32(action.Attrs().Index) + police.Bindcnt = int32(action.Attrs().Bindcnt) + police.Capab = uint32(action.Attrs().Capab) + police.Refcnt = int32(action.Attrs().Refcnt) + police.Rate.Rate = action.Rate + police.PeakRate.Rate = action.PeakRate + police.Action = int32(action.ExceedAction) + + if police.Rate.Rate != 0 { + police.Rate.Mpu = action.Mpu + police.Rate.Overhead = action.Overhead + if CalcRtable(&police.Rate, rtab[:], action.RCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("TBF: failed to calculate rate table") + } + police.Burst = Xmittime(uint64(police.Rate.Rate), action.Burst) + } + + police.Mtu = action.Mtu + if police.PeakRate.Rate != 0 { + police.PeakRate.Mpu = action.Mpu + police.PeakRate.Overhead = action.Overhead + if CalcRtable(&police.PeakRate, ptab[:], action.PCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("POLICE: failed to calculate peak rate table") + } + } + + attr.AddRtAttr(nl.TCA_POLICE_TBF, police.Serialize()) + if police.Rate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RATE, SerializeRtab(rtab)) + } + if police.PeakRate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_PEAKRATE, SerializeRtab(ptab)) + } + if action.AvRate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_AVRATE, nl.Uint32Attr(action.AvRate)) + } + if action.NotExceedAction != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RESULT, nl.Uint32Attr(uint32(action.NotExceedAction))) + } + + return nil +} + func EncodeActions(attr *nl.RtAttr, actions []Action) error { tabIndex := int(nl.TCA_ACT_TAB) @@ -418,6 +528,14 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { switch action := action.(type) { default: return fmt.Errorf("unknown action type %s", action.Type()) + case *PoliceAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("police")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + if err := encodePolice(aopts, action); err != nil { + return err + } case *MirredAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -489,6 +607,16 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { } toTcGen(action.Attrs(), &connmark.TcGen) aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize()) + case *CsumAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("csum")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + csum := nl.TcCsum{ + UpdateFlags: uint32(action.UpdateFlags), + } + toTcGen(action.Attrs(), &csum.TcGen) + aopts.AddRtAttr(nl.TCA_CSUM_PARMS, csum.Serialize()) case *BpfAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -512,6 +640,29 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { return nil } +func parsePolice(data syscall.NetlinkRouteAttr, police *PoliceAction) { + switch data.Attr.Type { + case nl.TCA_POLICE_RESULT: + police.NotExceedAction = TcPolAct(native.Uint32(data.Value[0:4])) + case nl.TCA_POLICE_AVRATE: + police.AvRate = native.Uint32(data.Value[0:4]) + case nl.TCA_POLICE_TBF: + p := *nl.DeserializeTcPolice(data.Value) + police.ActionAttrs = ActionAttrs{} + police.Attrs().Index = int(p.Index) + police.Attrs().Bindcnt = int(p.Bindcnt) + police.Attrs().Capab = int(p.Capab) + police.Attrs().Refcnt = int(p.Refcnt) + police.ExceedAction = TcPolAct(p.Action) + police.Rate = p.Rate.Rate + police.PeakRate = p.PeakRate.Rate + police.Burst = Xmitsize(uint64(p.Rate.Rate), p.Burst) + police.Mtu = p.Mtu + police.LinkLayer = int(p.Rate.Linklayer) & nl.TC_LINKLAYER_MASK + police.Overhead = p.Rate.Overhead + } +} + func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { var actions []Action for _, table := range tables { @@ -534,12 +685,16 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &BpfAction{} case "connmark": action = &ConnmarkAction{} + case "csum": + action = &CsumAction{} case "gact": action = &GenericAction{} case "tunnel_key": action = &TunnelKeyAction{} case "skbedit": action = &SkbEditAction{} + case "police": + action = &PoliceAction{} default: break nextattr } @@ -612,12 +767,22 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { toAttrs(&connmark.TcGen, action.Attrs()) action.(*ConnmarkAction).Zone = connmark.Zone } + case "csum": + switch adatum.Attr.Type { + case nl.TCA_CSUM_PARMS: + csum := *nl.DeserializeTcCsum(adatum.Value) + action.(*CsumAction).ActionAttrs = ActionAttrs{} + toAttrs(&csum.TcGen, action.Attrs()) + action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags) + } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: gen := *nl.DeserializeTcGen(adatum.Value) toAttrs(&gen, action.Attrs()) } + case "police": + parsePolice(adatum, action.(*PoliceAction)) } } } @@ -628,7 +793,6 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { } func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() u32 := filter.(*U32) detailed := false for _, datum := range data { @@ -666,14 +830,15 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) u32.Divisor = native.Uint32(datum.Value) case nl.TCA_U32_HASH: u32.Hash = native.Uint32(datum.Value) + case nl.TCA_U32_LINK: + u32.Link = native.Uint32(datum.Value) } } return detailed, nil } func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() - fw := filter.(*Fw) + fw := filter.(*FwFilter) detailed := true for _, datum := range data { switch datum.Attr.Type { @@ -684,24 +849,18 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { case nl.TCA_FW_INDEV: fw.InDev = string(datum.Value[:len(datum.Value)-1]) case nl.TCA_FW_POLICE: + var police PoliceAction adata, _ := nl.ParseRouteAttr(datum.Value) for _, aattr := range adata { - switch aattr.Attr.Type { - case nl.TCA_POLICE_TBF: - fw.Police = *nl.DeserializeTcPolice(aattr.Value) - case nl.TCA_POLICE_RATE: - fw.Rtab = DeserializeRtab(aattr.Value) - case nl.TCA_POLICE_PEAKRATE: - fw.Ptab = DeserializeRtab(aattr.Value) - } + parsePolice(aattr, &police) } + fw.Police = &police } } return detailed, nil } func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() bpf := filter.(*BpfFilter) detailed := true for _, datum := range data { @@ -720,14 +879,13 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) case nl.TCA_BPF_ID: bpf.Id = int(native.Uint32(datum.Value[0:4])) case nl.TCA_BPF_TAG: - bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1]) + bpf.Tag = hex.EncodeToString(datum.Value) } } return detailed, nil } func parseMatchAllData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - native = nl.NativeEndian() matchall := filter.(*MatchAll) detailed := true for _, datum := range data { @@ -748,6 +906,10 @@ func parseMatchAllData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, er return detailed, nil } +func parseFlowerData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + return true, filter.(*Flower).decode(data) +} + func AlignToAtm(size uint) uint { var linksize, cells int cells = int(size / nl.ATM_CELL_PAYLOAD) @@ -795,14 +957,12 @@ func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, lin func DeserializeRtab(b []byte) [256]uint32 { var rtab [256]uint32 - native := nl.NativeEndian() r := bytes.NewReader(b) _ = binary.Read(r, native, &rtab) return rtab } func SerializeRtab(rtab [256]uint32) []byte { - native := nl.NativeEndian() var w bytes.Buffer _ = binary.Write(&w, native, rtab) return w.Bytes() diff --git a/agent/vendor/github.com/vishvananda/netlink/handle_linux.go b/agent/vendor/github.com/vishvananda/netlink/handle_linux.go index 26887b75961..4cb01168781 100644 --- a/agent/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -15,12 +15,28 @@ var pkgHandle = &Handle{} // Handle is an handle for the netlink requests on a // specific network namespace. All the requests on the // same netlink family share the same netlink socket, -// which gets released when the handle is deleted. +// which gets released when the handle is Close'd. type Handle struct { sockets map[int]*nl.SocketHandle lookupByDump bool } +// SetSocketTimeout configures timeout for default netlink sockets +func SetSocketTimeout(to time.Duration) error { + if to < time.Microsecond { + return fmt.Errorf("invalid timeout, minimul value is %s", time.Microsecond) + } + + nl.SocketTimeoutTv = unix.NsecToTimeval(to.Nanoseconds()) + return nil +} + +// GetSocketTimeout returns the timeout value used by default netlink sockets +func GetSocketTimeout() time.Duration { + nsec := unix.TimevalToNsec(nl.SocketTimeoutTv) + return time.Duration(nsec) * time.Nanosecond +} + // SupportsNetlinkFamily reports whether the passed netlink family is supported by this Handle func (h *Handle) SupportsNetlinkFamily(nlFamily int) bool { _, ok := h.sockets[nlFamily] @@ -91,6 +107,21 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { return results, nil } +// SetStrictCheck sets the strict check socket option for each socket in the netlink handle. Returns early if any set operation fails +func (h *Handle) SetStrictCheck(state bool) error { + for _, sh := range h.sockets { + var stateInt int = 0 + if state { + stateInt = 1 + } + err := unix.SetsockoptInt(sh.Socket.GetFd(), unix.SOL_NETLINK, unix.NETLINK_GET_STRICT_CHK, stateInt) + if err != nil { + return err + } + } + return nil +} + // NewHandleAt returns a netlink handle on the network namespace // specified by ns. If ns=netns.None(), current network namespace // will be assumed @@ -120,14 +151,22 @@ func newHandle(newNs, curNs netns.NsHandle, nlFamilies ...int) (*Handle, error) return h, nil } -// Delete releases the resources allocated to this handle -func (h *Handle) Delete() { +// Close releases the resources allocated to this handle +func (h *Handle) Close() { for _, sh := range h.sockets { sh.Close() } h.sockets = nil } +// Delete releases the resources allocated to this handle +// +// Deprecated: use Close instead which is in line with typical resource release +// patterns for files and other resources. +func (h *Handle) Delete() { + h.Close() +} + func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest { // Do this so that package API still use nl package variable nextSeqNr if h.sockets == nil { diff --git a/agent/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/agent/vendor/github.com/vishvananda/netlink/handle_unspecified.go index df341f706ea..cc94a4e007e 100644 --- a/agent/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/agent/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -23,6 +23,8 @@ func NewHandleAtFrom(newNs, curNs netns.NsHandle) (*Handle, error) { return nil, ErrNotImplemented } +func (h *Handle) Close() {} + func (h *Handle) Delete() {} func (h *Handle) SupportsNetlinkFamily(nlFamily int) bool { @@ -237,6 +239,10 @@ func (h *Handle) RouteAdd(route *Route) error { return ErrNotImplemented } +func (h *Handle) RouteAppend(route *Route) error { + return ErrNotImplemented +} + func (h *Handle) RouteDel(route *Route) error { return ErrNotImplemented } diff --git a/agent/vendor/github.com/vishvananda/netlink/inet_diag.go b/agent/vendor/github.com/vishvananda/netlink/inet_diag.go index 72c1fcb59c4..bee391a8098 100644 --- a/agent/vendor/github.com/vishvananda/netlink/inet_diag.go +++ b/agent/vendor/github.com/vishvananda/netlink/inet_diag.go @@ -27,4 +27,5 @@ const ( type InetDiagTCPInfoResp struct { InetDiagMsg *Socket TCPInfo *TCPInfo + TCPBBRInfo *TCPBBRInfo } diff --git a/agent/vendor/github.com/vishvananda/netlink/ipset_linux.go b/agent/vendor/github.com/vishvananda/netlink/ipset_linux.go index 5487fc1cce3..1f4eae81c24 100644 --- a/agent/vendor/github.com/vishvananda/netlink/ipset_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/ipset_linux.go @@ -1,6 +1,7 @@ package netlink import ( + "encoding/binary" "log" "net" "syscall" @@ -11,25 +12,40 @@ import ( // IPSetEntry is used for adding, updating, retreiving and deleting entries type IPSetEntry struct { - Comment string - MAC net.HardwareAddr - IP net.IP - Timeout *uint32 - Packets *uint64 - Bytes *uint64 + Comment string + MAC net.HardwareAddr + IP net.IP + CIDR uint8 + Timeout *uint32 + Packets *uint64 + Bytes *uint64 + Protocol *uint8 + Port *uint16 + IP2 net.IP + CIDR2 uint8 + IFace string + Mark *uint32 Replace bool // replace existing entry } // IPSetResult is the result of a dump request for a set type IPSetResult struct { - Nfgenmsg *nl.Nfgenmsg - Protocol uint8 - Revision uint8 - Family uint8 - Flags uint8 - SetName string - TypeName string + Nfgenmsg *nl.Nfgenmsg + Protocol uint8 + ProtocolMinVersion uint8 + Revision uint8 + Family uint8 + Flags uint8 + SetName string + TypeName string + Comment string + MarkMask uint32 + + IPFrom net.IP + IPTo net.IP + PortFrom uint16 + PortTo uint16 HashSize uint32 NumEntries uint32 @@ -38,6 +54,7 @@ type IPSetResult struct { SizeInMemory uint32 CadtFlags uint32 Timeout *uint32 + LineNo uint32 Entries []IPSetEntry } @@ -49,10 +66,16 @@ type IpsetCreateOptions struct { Counters bool Comments bool Skbinfo bool + + Revision uint8 + IPFrom net.IP + IPTo net.IP + PortFrom uint16 + PortTo uint16 } // IpsetProtocol returns the ipset protocol version from the kernel -func IpsetProtocol() (uint8, error) { +func IpsetProtocol() (uint8, uint8, error) { return pkgHandle.IpsetProtocol() } @@ -83,23 +106,23 @@ func IpsetListAll() ([]IPSetResult, error) { // IpsetAdd adds an entry to an existing ipset. func IpsetAdd(setname string, entry *IPSetEntry) error { - return pkgHandle.ipsetAddDel(nl.IPSET_CMD_ADD, setname, entry) + return pkgHandle.IpsetAdd(setname, entry) } -// IpsetDele deletes an entry from an existing ipset. +// IpsetDel deletes an entry from an existing ipset. func IpsetDel(setname string, entry *IPSetEntry) error { - return pkgHandle.ipsetAddDel(nl.IPSET_CMD_DEL, setname, entry) + return pkgHandle.IpsetDel(setname, entry) } -func (h *Handle) IpsetProtocol() (uint8, error) { +func (h *Handle) IpsetProtocol() (protocol uint8, minVersion uint8, err error) { req := h.newIpsetRequest(nl.IPSET_CMD_PROTOCOL) msgs, err := req.Execute(unix.NETLINK_NETFILTER, 0) if err != nil { - return 0, err + return 0, 0, err } - - return ipsetUnserialize(msgs).Protocol, nil + response := ipsetUnserialize(msgs) + return response.Protocol, response.ProtocolMinVersion, nil } func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOptions) error { @@ -111,11 +134,30 @@ func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOption req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_TYPENAME, nl.ZeroTerminated(typename))) - req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_REVISION, nl.Uint8Attr(0))) - req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_FAMILY, nl.Uint8Attr(0))) + + revision := options.Revision + if revision == 0 { + revision = getIpsetDefaultWithTypeName(typename) + } + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_REVISION, nl.Uint8Attr(revision))) data := nl.NewRtAttr(nl.IPSET_ATTR_DATA|int(nl.NLA_F_NESTED), nil) + var family uint8 + switch typename { + case "hash:mac": + case "bitmap:port": + buf := make([]byte, 4) + binary.BigEndian.PutUint16(buf, options.PortFrom) + binary.BigEndian.PutUint16(buf[2:], options.PortTo) + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_FROM|int(nl.NLA_F_NET_BYTEORDER), buf[:2])) + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_TO|int(nl.NLA_F_NET_BYTEORDER), buf[2:])) + default: + family = unix.AF_INET + } + + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_FAMILY, nl.Uint8Attr(family))) + if timeout := options.Timeout; timeout != nil { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER, Value: *timeout}) } @@ -184,9 +226,24 @@ func (h *Handle) IpsetListAll() ([]IPSetResult, error) { return result, nil } +// IpsetAdd adds an entry to an existing ipset. +func (h *Handle) IpsetAdd(setname string, entry *IPSetEntry) error { + return h.ipsetAddDel(nl.IPSET_CMD_ADD, setname, entry) +} + +// IpsetDel deletes an entry from an existing ipset. +func (h *Handle) IpsetDel(setname string, entry *IPSetEntry) error { + return h.ipsetAddDel(nl.IPSET_CMD_DEL, setname, entry) +} + func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error { req := h.newIpsetRequest(nlCmd) req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + + if entry.Comment != "" { + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment))) + } + data := nl.NewRtAttr(nl.IPSET_ATTR_DATA|int(nl.NLA_F_NESTED), nil) if !entry.Replace { @@ -196,10 +253,49 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error if entry.Timeout != nil { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER, Value: *entry.Timeout}) } + + if entry.IP != nil { + nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP) + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NESTED), nestedData.Serialize())) + } + if entry.MAC != nil { data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_ETHER, entry.MAC)) } + if entry.CIDR != 0 { + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_CIDR, nl.Uint8Attr(entry.CIDR))) + } + + if entry.IP2 != nil { + nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP2) + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP2|int(nl.NLA_F_NESTED), nestedData.Serialize())) + } + + if entry.CIDR2 != 0 { + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_CIDR2, nl.Uint8Attr(entry.CIDR2))) + } + + if entry.Port != nil { + if entry.Protocol == nil { + // use tcp protocol as default + val := uint8(unix.IPPROTO_TCP) + entry.Protocol = &val + } + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PROTO, nl.Uint8Attr(*entry.Protocol))) + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, *entry.Port) + data.AddChild(nl.NewRtAttr(int(nl.IPSET_ATTR_PORT|nl.NLA_F_NET_BYTEORDER), buf)) + } + + if entry.IFace != "" { + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IFACE, nl.ZeroTerminated(entry.IFace))) + } + + if entry.Mark != nil { + data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER, Value: *entry.Mark}) + } + data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_LINENO | nl.NLA_F_NET_BYTEORDER, Value: 0}) req.AddData(data) @@ -222,6 +318,17 @@ func (h *Handle) newIpsetRequest(cmd int) *nl.NetlinkRequest { return req } +func getIpsetDefaultWithTypeName(typename string) uint8 { + switch typename { + case "hash:ip,port", + "hash:ip,port,ip", + "hash:ip,port,net", + "hash:net,port": + return 1 + } + return 0 +} + func ipsetExecute(req *nl.NetlinkRequest) (msgs [][]byte, err error) { msgs, err = req.Execute(unix.NETLINK_NETFILTER, 0) @@ -249,6 +356,8 @@ func (result *IPSetResult) unserialize(msg []byte) { result.Protocol = attr.Value[0] case nl.IPSET_ATTR_SETNAME: result.SetName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_COMMENT: + result.Comment = nl.BytesToString(attr.Value) case nl.IPSET_ATTR_TYPENAME: result.TypeName = nl.BytesToString(attr.Value) case nl.IPSET_ATTR_REVISION: @@ -261,6 +370,10 @@ func (result *IPSetResult) unserialize(msg []byte) { result.parseAttrData(attr.Value) case nl.IPSET_ATTR_ADT | nl.NLA_F_NESTED: result.parseAttrADT(attr.Value) + case nl.IPSET_ATTR_PROTOCOL_MIN: + result.ProtocolMinVersion = attr.Value[0] + case nl.IPSET_ATTR_MARKMASK: + result.MarkMask = attr.Uint32() default: log.Printf("unknown ipset attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) } @@ -285,6 +398,36 @@ func (result *IPSetResult) parseAttrData(data []byte) { result.SizeInMemory = attr.Uint32() case nl.IPSET_ATTR_CADT_FLAGS | nl.NLA_F_NET_BYTEORDER: result.CadtFlags = attr.Uint32() + case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: + for nested := range nl.ParseAttributes(attr.Value) { + switch nested.Type { + case nl.IPSET_ATTR_IP | nl.NLA_F_NET_BYTEORDER: + result.Entries = append(result.Entries, IPSetEntry{IP: nested.Value}) + case nl.IPSET_ATTR_IP: + result.IPFrom = nested.Value + default: + log.Printf("unknown nested ipset data attribute from kernel: %+v %v", nested, nested.Type&nl.NLA_TYPE_MASK) + } + } + case nl.IPSET_ATTR_IP_TO | nl.NLA_F_NESTED: + for nested := range nl.ParseAttributes(attr.Value) { + switch nested.Type { + case nl.IPSET_ATTR_IP: + result.IPTo = nested.Value + default: + log.Printf("unknown nested ipset data attribute from kernel: %+v %v", nested, nested.Type&nl.NLA_TYPE_MASK) + } + } + case nl.IPSET_ATTR_PORT_FROM | nl.NLA_F_NET_BYTEORDER: + result.PortFrom = networkOrder.Uint16(attr.Value) + case nl.IPSET_ATTR_PORT_TO | nl.NLA_F_NET_BYTEORDER: + result.PortTo = networkOrder.Uint16(attr.Value) + case nl.IPSET_ATTR_CADT_LINENO | nl.NLA_F_NET_BYTEORDER: + result.LineNo = attr.Uint32() + case nl.IPSET_ATTR_COMMENT: + result.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_MARKMASK: + result.MarkMask = attr.Uint32() default: log.Printf("unknown ipset data attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) } @@ -316,6 +459,8 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { entry.Packets = &val case nl.IPSET_ATTR_ETHER: entry.MAC = net.HardwareAddr(attr.Value) + case nl.IPSET_ATTR_IP: + entry.IP = net.IP(attr.Value) case nl.IPSET_ATTR_COMMENT: entry.Comment = nl.BytesToString(attr.Value) case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: @@ -327,6 +472,30 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { log.Printf("unknown nested ADT attribute from kernel: %+v", attr) } } + case nl.IPSET_ATTR_IP2 | nl.NLA_F_NESTED: + for attr := range nl.ParseAttributes(attr.Value) { + switch attr.Type { + case nl.IPSET_ATTR_IP: + entry.IP2 = net.IP(attr.Value) + default: + log.Printf("unknown nested ADT attribute from kernel: %+v", attr) + } + } + case nl.IPSET_ATTR_CIDR: + entry.CIDR = attr.Value[0] + case nl.IPSET_ATTR_CIDR2: + entry.CIDR2 = attr.Value[0] + case nl.IPSET_ATTR_PORT | nl.NLA_F_NET_BYTEORDER: + val := networkOrder.Uint16(attr.Value) + entry.Port = &val + case nl.IPSET_ATTR_PROTO: + val := attr.Value[0] + entry.Protocol = &val + case nl.IPSET_ATTR_IFACE: + entry.IFace = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + entry.Mark = &val default: log.Printf("unknown ADT attribute from kernel: %+v", attr) } diff --git a/agent/vendor/github.com/vishvananda/netlink/link.go b/agent/vendor/github.com/vishvananda/netlink/link.go index e2441bd71f8..33c872336d6 100644 --- a/agent/vendor/github.com/vishvananda/netlink/link.go +++ b/agent/vendor/github.com/vishvananda/netlink/link.go @@ -35,10 +35,13 @@ type LinkAttrs struct { Alias string Statistics *LinkStatistics Promisc int + Allmulti int + Multi int Xdp *LinkXdp EncapType string Protinfo *Protinfo OperState LinkOperState + PhysSwitchID int NetNsID int NumTxQueues int NumRxQueues int @@ -456,6 +459,19 @@ func (ipvlan *IPVlan) Type() string { return "ipvlan" } +// IPVtap - IPVtap is a virtual interfaces based on ipvlan +type IPVtap struct { + IPVlan +} + +func (ipvtap *IPVtap) Attrs() *LinkAttrs { + return &ipvtap.LinkAttrs +} + +func (ipvtap IPVtap) Type() string { + return "ipvtap" +} + // VlanProtocol type type VlanProtocol int @@ -555,6 +571,27 @@ const ( BOND_ARP_VALIDATE_ALL ) +var bondArpValidateToString = map[BondArpValidate]string{ + BOND_ARP_VALIDATE_NONE: "none", + BOND_ARP_VALIDATE_ACTIVE: "active", + BOND_ARP_VALIDATE_BACKUP: "backup", + BOND_ARP_VALIDATE_ALL: "none", +} +var StringToBondArpValidateMap = map[string]BondArpValidate{ + "none": BOND_ARP_VALIDATE_NONE, + "active": BOND_ARP_VALIDATE_ACTIVE, + "backup": BOND_ARP_VALIDATE_BACKUP, + "all": BOND_ARP_VALIDATE_ALL, +} + +func (b BondArpValidate) String() string { + s, ok := bondArpValidateToString[b] + if !ok { + return fmt.Sprintf("BondArpValidate(%d)", b) + } + return s +} + // BondPrimaryReselect type type BondPrimaryReselect int @@ -565,6 +602,25 @@ const ( BOND_PRIMARY_RESELECT_FAILURE ) +var bondPrimaryReselectToString = map[BondPrimaryReselect]string{ + BOND_PRIMARY_RESELECT_ALWAYS: "always", + BOND_PRIMARY_RESELECT_BETTER: "better", + BOND_PRIMARY_RESELECT_FAILURE: "failure", +} +var StringToBondPrimaryReselectMap = map[string]BondPrimaryReselect{ + "always": BOND_PRIMARY_RESELECT_ALWAYS, + "better": BOND_PRIMARY_RESELECT_BETTER, + "failure": BOND_PRIMARY_RESELECT_FAILURE, +} + +func (b BondPrimaryReselect) String() string { + s, ok := bondPrimaryReselectToString[b] + if !ok { + return fmt.Sprintf("BondPrimaryReselect(%d)", b) + } + return s +} + // BondArpAllTargets type type BondArpAllTargets int @@ -574,6 +630,23 @@ const ( BOND_ARP_ALL_TARGETS_ALL ) +var bondArpAllTargetsToString = map[BondArpAllTargets]string{ + BOND_ARP_ALL_TARGETS_ANY: "any", + BOND_ARP_ALL_TARGETS_ALL: "all", +} +var StringToBondArpAllTargetsMap = map[string]BondArpAllTargets{ + "any": BOND_ARP_ALL_TARGETS_ANY, + "all": BOND_ARP_ALL_TARGETS_ALL, +} + +func (b BondArpAllTargets) String() string { + s, ok := bondArpAllTargetsToString[b] + if !ok { + return fmt.Sprintf("BondArpAllTargets(%d)", b) + } + return s +} + // BondFailOverMac type type BondFailOverMac int @@ -584,6 +657,25 @@ const ( BOND_FAIL_OVER_MAC_FOLLOW ) +var bondFailOverMacToString = map[BondFailOverMac]string{ + BOND_FAIL_OVER_MAC_NONE: "none", + BOND_FAIL_OVER_MAC_ACTIVE: "active", + BOND_FAIL_OVER_MAC_FOLLOW: "follow", +} +var StringToBondFailOverMacMap = map[string]BondFailOverMac{ + "none": BOND_FAIL_OVER_MAC_NONE, + "active": BOND_FAIL_OVER_MAC_ACTIVE, + "follow": BOND_FAIL_OVER_MAC_FOLLOW, +} + +func (b BondFailOverMac) String() string { + s, ok := bondFailOverMacToString[b] + if !ok { + return fmt.Sprintf("BondFailOverMac(%d)", b) + } + return s +} + // BondXmitHashPolicy type type BondXmitHashPolicy int @@ -675,6 +767,25 @@ const ( BOND_AD_SELECT_COUNT ) +var bondAdSelectToString = map[BondAdSelect]string{ + BOND_AD_SELECT_STABLE: "stable", + BOND_AD_SELECT_BANDWIDTH: "bandwidth", + BOND_AD_SELECT_COUNT: "count", +} +var StringToBondAdSelectMap = map[string]BondAdSelect{ + "stable": BOND_AD_SELECT_STABLE, + "bandwidth": BOND_AD_SELECT_BANDWIDTH, + "count": BOND_AD_SELECT_COUNT, +} + +func (b BondAdSelect) String() string { + s, ok := bondAdSelectToString[b] + if !ok { + return fmt.Sprintf("BondAdSelect(%d)", b) + } + return s +} + // BondAdInfo represents ad info for bond type BondAdInfo struct { AggregatorId int @@ -706,7 +817,7 @@ type Bond struct { AllSlavesActive int MinLinks int LpInterval int - PackersPerSlave int + PacketsPerSlave int LacpRate BondLacpRate AdSelect BondAdSelect // looking at iproute tool AdInfo can only be retrived. It can't be set. @@ -739,7 +850,7 @@ func NewLinkBond(atr LinkAttrs) *Bond { AllSlavesActive: -1, MinLinks: -1, LpInterval: -1, - PackersPerSlave: -1, + PacketsPerSlave: -1, LacpRate: -1, AdSelect: -1, AdActorSysPrio: -1, @@ -789,8 +900,10 @@ func (bond *Bond) Type() string { type BondSlaveState uint8 const ( - BondStateActive = iota // Link is active. - BondStateBackup // Link is backup. + //BondStateActive Link is active. + BondStateActive BondSlaveState = iota + //BondStateBackup Link is backup. + BondStateBackup ) func (s BondSlaveState) String() string { @@ -804,15 +917,19 @@ func (s BondSlaveState) String() string { } } -// BondSlaveState represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave +// BondSlaveMiiStatus represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave // attribute, which contains the status of MII link monitoring type BondSlaveMiiStatus uint8 const ( - BondLinkUp = iota // link is up and running. - BondLinkFail // link has just gone down. - BondLinkDown // link has been down for too long time. - BondLinkBack // link is going back. + //BondLinkUp link is up and running. + BondLinkUp BondSlaveMiiStatus = iota + //BondLinkFail link has just gone down. + BondLinkFail + //BondLinkDown link has been down for too long time. + BondLinkDown + //BondLinkBack link is going back. + BondLinkBack ) func (s BondSlaveMiiStatus) String() string { @@ -845,6 +962,38 @@ func (b *BondSlave) SlaveType() string { return "bond" } +type VrfSlave struct { + Table uint32 +} + +func (v *VrfSlave) SlaveType() string { + return "vrf" +} + +// Geneve devices must specify RemoteIP and ID (VNI) on create +// https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/drivers/net/geneve.c#L1209-L1223 +type Geneve struct { + LinkAttrs + ID uint32 // vni + Remote net.IP + Ttl uint8 + Tos uint8 + Dport uint16 + UdpCsum uint8 + UdpZeroCsum6Tx uint8 + UdpZeroCsum6Rx uint8 + Link uint32 + FlowBased bool +} + +func (geneve *Geneve) Attrs() *LinkAttrs { + return &geneve.LinkAttrs +} + +func (geneve *Geneve) Type() string { + return "geneve" +} + // Gretap devices must specify LocalIP and RemoteIP on create type Gretap struct { LinkAttrs @@ -889,6 +1038,7 @@ type Iptun struct { EncapType uint16 EncapFlags uint16 FlowBased bool + Proto uint8 } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -924,6 +1074,37 @@ func (ip6tnl *Ip6tnl) Type() string { return "ip6tnl" } +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L84 +type TunnelEncapType uint16 + +const ( + None TunnelEncapType = iota + FOU + GUE +) + +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L91 +type TunnelEncapFlag uint16 + +const ( + CSum TunnelEncapFlag = 1 << 0 + CSum6 = 1 << 1 + RemCSum = 1 << 2 +) + +// from https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ip6_tunnel.h#L12 +type IP6TunnelFlag uint16 + +const ( + IP6_TNL_F_IGN_ENCAP_LIMIT IP6TunnelFlag = 1 // don't add encapsulation limit if one isn't present in inner packet + IP6_TNL_F_USE_ORIG_TCLASS = 2 // copy the traffic class field from the inner packet + IP6_TNL_F_USE_ORIG_FLOWLABEL = 4 // copy the flowlabel from the inner packet + IP6_TNL_F_MIP6_DEV = 8 // being used for Mobile IPv6 + IP6_TNL_F_RCV_DSCP_COPY = 10 // copy DSCP from the outer packet + IP6_TNL_F_USE_ORIG_FWMARK = 20 // copy fwmark from inner packet + IP6_TNL_F_ALLOW_LOCAL_REMOTE = 40 // allow remote endpoint on the local node +) + type Sittun struct { LinkAttrs Link uint32 @@ -1068,6 +1249,58 @@ var StringToIPoIBMode = map[string]IPoIBMode{ "connected": IPOIB_MODE_CONNECTED, } +const ( + CAN_STATE_ERROR_ACTIVE = iota + CAN_STATE_ERROR_WARNING + CAN_STATE_ERROR_PASSIVE + CAN_STATE_BUS_OFF + CAN_STATE_STOPPED + CAN_STATE_SLEEPING +) + +type Can struct { + LinkAttrs + + BitRate uint32 + SamplePoint uint32 + TimeQuanta uint32 + PropagationSegment uint32 + PhaseSegment1 uint32 + PhaseSegment2 uint32 + SyncJumpWidth uint32 + BitRatePreScaler uint32 + + Name string + TimeSegment1Min uint32 + TimeSegment1Max uint32 + TimeSegment2Min uint32 + TimeSegment2Max uint32 + SyncJumpWidthMax uint32 + BitRatePreScalerMin uint32 + BitRatePreScalerMax uint32 + BitRatePreScalerInc uint32 + + ClockFrequency uint32 + + State uint32 + + Mask uint32 + Flags uint32 + + TxError uint16 + RxError uint16 + + RestartMs uint32 +} + +func (can *Can) Attrs() *LinkAttrs { + return &can.LinkAttrs +} + +func (can *Can) Type() string { + return "can" +} + type IPoIB struct { LinkAttrs Pkey uint16 @@ -1083,11 +1316,27 @@ func (ipoib *IPoIB) Type() string { return "ipoib" } +type BareUDP struct { + LinkAttrs + Port uint16 + EtherType uint16 + SrcPortMin uint16 + MultiProto bool +} + +func (bareudp *BareUDP) Attrs() *LinkAttrs { + return &bareudp.LinkAttrs +} + +func (bareudp *BareUDP) Type() string { + return "bareudp" +} + // iproute2 supported devices; // vlan | veth | vcan | dummy | ifb | macvlan | macvtap | // bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | // gre | gretap | ip6gre | ip6gretap | vti | vti6 | nlmon | -// bond_slave | ipvlan | xfrm +// bond_slave | ipvlan | xfrm | bareudp // LinkNotFoundError wraps the various not found errors when // getting/reading links. This is intended for better error diff --git a/agent/vendor/github.com/vishvananda/netlink/link_linux.go b/agent/vendor/github.com/vishvananda/netlink/link_linux.go index c02fa63b812..276947a0069 100644 --- a/agent/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/link_linux.go @@ -34,14 +34,27 @@ const ( TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI ) +var StringToTuntapModeMap = map[string]TuntapMode{ + "tun": TUNTAP_MODE_TUN, + "tap": TUNTAP_MODE_TAP, +} + +func (ttm TuntapMode) String() string { + switch ttm { + case TUNTAP_MODE_TUN: + return "tun" + case TUNTAP_MODE_TAP: + return "tap" + } + return "unknown" +} + const ( VF_LINK_STATE_AUTO uint32 = 0 VF_LINK_STATE_ENABLE uint32 = 1 VF_LINK_STATE_DISABLE uint32 = 2 ) -var lookupByDump = false - var macvlanModes = [...]uint32{ 0, nl.MACVLAN_MODE_PRIVATE, @@ -138,7 +151,6 @@ func (h *Handle) LinkSetAllmulticastOn(link Link) error { msg := nl.NewIfInfomsg(unix.AF_UNSPEC) msg.Change = unix.IFF_ALLMULTI msg.Flags = unix.IFF_ALLMULTI - msg.Index = int32(base.Index) req.AddData(msg) @@ -168,6 +180,51 @@ func (h *Handle) LinkSetAllmulticastOff(link Link) error { return err } +// LinkSetMulticastOn enables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast on` +func LinkSetMulticastOn(link Link) error { + return pkgHandle.LinkSetMulticastOn(link) +} + +// LinkSetMulticastOn enables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast on` +func (h *Handle) LinkSetMulticastOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_MULTICAST + msg.Flags = unix.IFF_MULTICAST + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAllmulticastOff disables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast off` +func LinkSetMulticastOff(link Link) error { + return pkgHandle.LinkSetMulticastOff(link) +} + +// LinkSetAllmulticastOff disables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast off` +func (h *Handle) LinkSetMulticastOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_MULTICAST + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { return pkgHandle.MacvlanMACAddrAdd(link, addr) } @@ -532,13 +589,13 @@ func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { req.AddData(msg) data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) - info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) vfmsg := nl.VfVlan{ Vf: uint32(vf), Vlan: uint32(vlan), Qos: uint32(qos), } - nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) + info.AddRtAttr(nl.IFLA_VF_VLAN, vfmsg.Serialize()) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) @@ -1046,8 +1103,8 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { if bond.LpInterval >= 0 { data.AddRtAttr(nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) } - if bond.PackersPerSlave >= 0 { - data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave))) + if bond.PacketsPerSlave >= 0 { + data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PacketsPerSlave))) } if bond.LacpRate >= 0 { data.AddRtAttr(nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) @@ -1089,6 +1146,10 @@ func (h *Handle) LinkAdd(link Link) error { return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) } +func LinkModify(link Link) error { + return pkgHandle.LinkModify(link) +} + func (h *Handle) LinkModify(link Link) error { return h.linkModify(link, unix.NLM_F_REQUEST|unix.NLM_F_ACK) } @@ -1203,9 +1264,26 @@ func (h *Handle) linkModify(link Link, flags int) error { } + control := func(file *os.File, f func(fd uintptr)) error { + name := file.Name() + conn, err := file.SyscallConn() + if err != nil { + return fmt.Errorf("SyscallConn() failed on %s: %v", name, err) + } + if err := conn.Control(f); err != nil { + return fmt.Errorf("Failed to get file descriptor for %s: %v", name, err) + } + return nil + } + // only persist interface if NonPersist is NOT set if !tuntap.NonPersist { - _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) + var errno syscall.Errno + if err := control(fds[0], func(fd uintptr) { + _, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TUNSETPERSIST), 1) + }); err != nil { + return err + } if errno != 0 { cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) @@ -1222,7 +1300,10 @@ func (h *Handle) linkModify(link Link, flags int) error { // un-persist (e.g. allow the interface to be removed) the tuntap // should not hurt if not set prior, condition might be not needed if !tuntap.NonPersist { - _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) + // ignore error + _ = control(fds[0], func(fd uintptr) { + _, _, _ = unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TUNSETPERSIST), 0) + }) } cleanupFds(fds) return err @@ -1394,6 +1475,10 @@ func (h *Handle) linkModify(link Link, flags int) error { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) + case *IPVtap: + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) case *Macvlan: if link.Mode != MACVLAN_MODE_DEFAULT { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -1404,6 +1489,8 @@ func (h *Handle) linkModify(link Link, flags int) error { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) } + case *Geneve: + addGeneveAttrs(link, linkInfo) case *Gretap: addGretapAttrs(link, linkInfo) case *Iptun: @@ -1426,6 +1513,8 @@ func (h *Handle) linkModify(link Link, flags int) error { addXfrmiAttrs(link, linkInfo) case *IPoIB: addIPoIBAttrs(link, linkInfo) + case *BareUDP: + addBareUDPAttrs(link, linkInfo) } req.AddData(linkInfo) @@ -1607,7 +1696,7 @@ func execGetLink(req *nl.NetlinkRequest) (Link, error) { } } -// linkDeserialize deserializes a raw message received from netlink into +// LinkDeserialize deserializes a raw message received from netlink into // a link object. func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { msg := nl.DeserializeIfInfomsg(m) @@ -1622,9 +1711,17 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.RawFlags = msg.Flags base.Flags = linkFlags(msg.Flags) base.EncapType = msg.EncapType() + base.NetNsID = -1 if msg.Flags&unix.IFF_PROMISC != 0 { base.Promisc = 1 } + if msg.Flags&unix.IFF_ALLMULTI != 0 { + base.Allmulti = 1 + } + if msg.Flags&unix.IFF_MULTICAST != 0 { + base.Multi = 1 + } + var ( link Link stats32 *LinkStatistics32 @@ -1663,10 +1760,14 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Bond{} case "ipvlan": link = &IPVlan{} + case "ipvtap": + link = &IPVtap{} case "macvlan": link = &Macvlan{} case "macvtap": link = &Macvtap{} + case "geneve": + link = &Geneve{} case "gretap": link = &Gretap{} case "ip6gretap": @@ -1693,6 +1794,10 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Tuntap{} case "ipoib": link = &IPoIB{} + case "can": + link = &Can{} + case "bareudp": + link = &BareUDP{} default: link = &GenericLink{LinkType: linkType} } @@ -1710,10 +1815,14 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseBondData(link, data) case "ipvlan": parseIPVlanData(link, data) + case "ipvtap": + parseIPVtapData(link, data) case "macvlan": parseMacvlanData(link, data) case "macvtap": parseMacvtapData(link, data) + case "geneve": + parseGeneveData(link, data) case "gretap": parseGretapData(link, data) case "ip6gretap": @@ -1742,13 +1851,21 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseTuntapData(link, data) case "ipoib": parseIPoIBData(link, data) + case "can": + parseCanData(link, data) + case "bareudp": + parseBareUDPData(link, data) } + case nl.IFLA_INFO_SLAVE_KIND: slaveType = string(info.Value[:len(info.Value)-1]) switch slaveType { case "bond": linkSlave = &BondSlave{} + case "vrf": + linkSlave = &VrfSlave{} } + case nl.IFLA_INFO_SLAVE_DATA: switch slaveType { case "bond": @@ -1757,6 +1874,12 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } parseBondSlaveData(linkSlave, data) + case "vrf": + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + parseVrfSlaveData(linkSlave, data) } } } @@ -1810,6 +1933,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { } case unix.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) + case unix.IFLA_PHYS_SWITCH_ID: + base.PhysSwitchID = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) case unix.IFLA_GSO_MAX_SIZE: @@ -1998,7 +2123,8 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c msgs, from, err := s.Receive() if err != nil { if cberr != nil { - cberr(err) + cberr(fmt.Errorf("Receive failed: %v", + err)) } return } @@ -2013,15 +2139,15 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) } - return + continue } ifmsg := nl.DeserializeIfInfomsg(m.Data) header := unix.NlMsghdr(m.Header) @@ -2030,7 +2156,7 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c if cberr != nil { cberr(err) } - return + continue } ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} } @@ -2299,7 +2425,7 @@ func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BOND_LP_INTERVAL: bond.LpInterval = int(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_PACKETS_PER_SLAVE: - bond.PackersPerSlave = int(native.Uint32(data[i].Value[0:4])) + bond.PacketsPerSlave = int(native.Uint32(data[i].Value[0:4])) case nl.IFLA_BOND_AD_LACP_RATE: bond.LacpRate = BondLacpRate(data[i].Value[0]) case nl.IFLA_BOND_AD_SELECT: @@ -2379,6 +2505,16 @@ func parseBondSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { } } +func parseVrfSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { + vrfSlave := slave.(*VrfSlave) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_SLAVE_STATE: + vrfSlave.Table = native.Uint32(data[i].Value[0:4]) + } + } +} + func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { ipv := link.(*IPVlan) for _, datum := range data { @@ -2391,6 +2527,18 @@ func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseIPVtapData(link Link, data []syscall.NetlinkRouteAttr) { + ipv := link.(*IPVtap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPVLAN_MODE: + ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_IPVLAN_FLAG: + ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4])) + } + } +} + func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvtap) parseMacvlanData(&macv.Macvlan, data) @@ -2448,6 +2596,58 @@ func linkFlags(rawFlags uint32) net.Flags { return f } +func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if geneve.FlowBased { + // In flow based mode, no other attributes need to be configured + linkInfo.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, boolAttr(geneve.FlowBased)) + return + } + + if ip := geneve.Remote; ip != nil { + if ip4 := ip.To4(); ip4 != nil { + data.AddRtAttr(nl.IFLA_GENEVE_REMOTE, ip.To4()) + } else { + data.AddRtAttr(nl.IFLA_GENEVE_REMOTE6, []byte(ip)) + } + } + + if geneve.ID != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_ID, nl.Uint32Attr(geneve.ID)) + } + + if geneve.Dport != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_PORT, htons(geneve.Dport)) + } + + if geneve.Ttl != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_TTL, nl.Uint8Attr(geneve.Ttl)) + } + + if geneve.Tos != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos)) + } +} + +func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { + geneve := link.(*Geneve) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GENEVE_ID: + geneve.ID = native.Uint32(datum.Value[0:4]) + case nl.IFLA_GENEVE_REMOTE, nl.IFLA_GENEVE_REMOTE6: + geneve.Remote = datum.Value + case nl.IFLA_GENEVE_PORT: + geneve.Dport = ntohs(datum.Value[0:2]) + case nl.IFLA_GENEVE_TTL: + geneve.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GENEVE_TOS: + geneve.Tos = uint8(datum.Value[0]) + } + } +} + func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -2674,11 +2874,16 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(iptun.Proto)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun := link.(*Iptun) for _, datum := range data { + // NOTE: same with vxlan, ip tunnel may also has null datum.Value + if len(datum.Value) == 0 { + continue + } switch datum.Attr.Type { case nl.IFLA_IPTUN_LOCAL: iptun.Local = net.IP(datum.Value[0:4]) @@ -2700,6 +2905,8 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_IPTUN_COLLECT_METADATA: iptun.FlowBased = true + case nl.IFLA_IPTUN_PROTO: + iptun.Proto = datum.Value[0] } } } @@ -3022,8 +3229,9 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex))) - data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) - + if xfrmi.Ifid != 0 { + data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) + } } func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { @@ -3172,9 +3380,86 @@ func parseIPoIBData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseCanData(link Link, data []syscall.NetlinkRouteAttr) { + can := link.(*Can) + for _, datum := range data { + + switch datum.Attr.Type { + case nl.IFLA_CAN_BITTIMING: + can.BitRate = native.Uint32(datum.Value) + can.SamplePoint = native.Uint32(datum.Value[4:]) + can.TimeQuanta = native.Uint32(datum.Value[8:]) + can.PropagationSegment = native.Uint32(datum.Value[12:]) + can.PhaseSegment1 = native.Uint32(datum.Value[16:]) + can.PhaseSegment2 = native.Uint32(datum.Value[20:]) + can.SyncJumpWidth = native.Uint32(datum.Value[24:]) + can.BitRatePreScaler = native.Uint32(datum.Value[28:]) + case nl.IFLA_CAN_BITTIMING_CONST: + can.Name = string(datum.Value[:16]) + can.TimeSegment1Min = native.Uint32(datum.Value[16:]) + can.TimeSegment1Max = native.Uint32(datum.Value[20:]) + can.TimeSegment2Min = native.Uint32(datum.Value[24:]) + can.TimeSegment2Max = native.Uint32(datum.Value[28:]) + can.SyncJumpWidthMax = native.Uint32(datum.Value[32:]) + can.BitRatePreScalerMin = native.Uint32(datum.Value[36:]) + can.BitRatePreScalerMax = native.Uint32(datum.Value[40:]) + can.BitRatePreScalerInc = native.Uint32(datum.Value[44:]) + case nl.IFLA_CAN_CLOCK: + can.ClockFrequency = native.Uint32(datum.Value) + case nl.IFLA_CAN_STATE: + can.State = native.Uint32(datum.Value) + case nl.IFLA_CAN_CTRLMODE: + can.Mask = native.Uint32(datum.Value) + can.Flags = native.Uint32(datum.Value[4:]) + case nl.IFLA_CAN_BERR_COUNTER: + can.TxError = native.Uint16(datum.Value) + can.RxError = native.Uint16(datum.Value[2:]) + case nl.IFLA_CAN_RESTART_MS: + can.RestartMs = native.Uint32(datum.Value) + case nl.IFLA_CAN_DATA_BITTIMING_CONST: + case nl.IFLA_CAN_RESTART: + case nl.IFLA_CAN_DATA_BITTIMING: + case nl.IFLA_CAN_TERMINATION: + case nl.IFLA_CAN_TERMINATION_CONST: + case nl.IFLA_CAN_BITRATE_CONST: + case nl.IFLA_CAN_DATA_BITRATE_CONST: + case nl.IFLA_CAN_BITRATE_MAX: + } + } +} + func addIPoIBAttrs(ipoib *IPoIB, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_IPOIB_PKEY, nl.Uint16Attr(uint16(ipoib.Pkey))) data.AddRtAttr(nl.IFLA_IPOIB_MODE, nl.Uint16Attr(uint16(ipoib.Mode))) data.AddRtAttr(nl.IFLA_IPOIB_UMCAST, nl.Uint16Attr(uint16(ipoib.Umcast))) } + +func addBareUDPAttrs(bareudp *BareUDP, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + data.AddRtAttr(nl.IFLA_BAREUDP_PORT, nl.Uint16Attr(nl.Swap16(bareudp.Port))) + data.AddRtAttr(nl.IFLA_BAREUDP_ETHERTYPE, nl.Uint16Attr(nl.Swap16(bareudp.EtherType))) + if bareudp.SrcPortMin != 0 { + data.AddRtAttr(nl.IFLA_BAREUDP_SRCPORT_MIN, nl.Uint16Attr(bareudp.SrcPortMin)) + } + if bareudp.MultiProto { + data.AddRtAttr(nl.IFLA_BAREUDP_MULTIPROTO_MODE, []byte{}) + } +} + +func parseBareUDPData(link Link, data []syscall.NetlinkRouteAttr) { + bareudp := link.(*BareUDP) + for _, attr := range data { + switch attr.Attr.Type { + case nl.IFLA_BAREUDP_PORT: + bareudp.Port = binary.BigEndian.Uint16(attr.Value) + case nl.IFLA_BAREUDP_ETHERTYPE: + bareudp.EtherType = binary.BigEndian.Uint16(attr.Value) + case nl.IFLA_BAREUDP_SRCPORT_MIN: + bareudp.SrcPortMin = native.Uint16(attr.Value) + case nl.IFLA_BAREUDP_MULTIPROTO_MODE: + bareudp.MultiProto = true + } + } +} diff --git a/agent/vendor/github.com/vishvananda/netlink/neigh.go b/agent/vendor/github.com/vishvananda/netlink/neigh.go index 379e5655f73..32d722e8858 100644 --- a/agent/vendor/github.com/vishvananda/netlink/neigh.go +++ b/agent/vendor/github.com/vishvananda/netlink/neigh.go @@ -12,6 +12,7 @@ type Neigh struct { State int Type int Flags int + FlagsExt int IP net.IP HardwareAddr net.HardwareAddr LLIPAddr net.IP //Used in the case of NHRP diff --git a/agent/vendor/github.com/vishvananda/netlink/neigh_linux.go b/agent/vendor/github.com/vishvananda/netlink/neigh_linux.go index fb220d141cc..4c1e766351c 100644 --- a/agent/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -24,7 +24,11 @@ const ( NDA_MASTER NDA_LINK_NETNSID NDA_SRC_VNI - NDA_MAX = NDA_SRC_VNI + NDA_PROTOCOL + NDA_NH_ID + NDA_FDB_EXT_ATTRS + NDA_FLAGS_EXT + NDA_MAX = NDA_FLAGS_EXT ) // Neighbor Cache Entry States. @@ -42,11 +46,19 @@ const ( // Neighbor Flags const ( - NTF_USE = 0x01 - NTF_SELF = 0x02 - NTF_MASTER = 0x04 - NTF_PROXY = 0x08 - NTF_ROUTER = 0x80 + NTF_USE = 0x01 + NTF_SELF = 0x02 + NTF_MASTER = 0x04 + NTF_PROXY = 0x08 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_STICKY = 0x40 + NTF_ROUTER = 0x80 +) + +// Extended Neighbor Flags +const ( + NTF_EXT_MANAGED = 0x00000001 ) // Ndmsg is for adding, removing or receiving information about a neighbor table entry @@ -162,11 +174,16 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { if neigh.LLIPAddr != nil { llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4()) req.AddData(llIPData) - } else if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil { + } else if neigh.HardwareAddr != nil { hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr)) req.AddData(hwData) } + if neigh.FlagsExt != 0 { + flagsExtData := nl.NewRtAttr(NDA_FLAGS_EXT, nl.Uint32Attr(uint32(neigh.FlagsExt))) + req.AddData(flagsExtData) + } + if neigh.Vlan != 0 { vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) req.AddData(vlanData) @@ -305,6 +322,8 @@ func NeighDeserialize(m []byte) (*Neigh, error) { } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } + case NDA_FLAGS_EXT: + neigh.FlagsExt = int(native.Uint32(attr.Value[0:4])) case NDA_VLAN: neigh.Vlan = int(native.Uint16(attr.Value[0:2])) case NDA_VNI: @@ -408,7 +427,6 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue diff --git a/agent/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/agent/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index 71436f25ccd..98d2c0dbf37 100644 --- a/agent/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/agent/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -180,14 +180,30 @@ func RouteAdd(route *Route) error { return ErrNotImplemented } +func RouteAppend(route *Route) error { + return ErrNotImplemented +} + func RouteDel(route *Route) error { return ErrNotImplemented } +func RouteGet(destination net.IP) ([]Route, error) { + return nil, ErrNotImplemented +} + func RouteList(link Link, family int) ([]Route, error) { return nil, ErrNotImplemented } +func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + return nil, ErrNotImplemented +} + +func RouteReplace(route *Route) error { + return ErrNotImplemented +} + func XfrmPolicyAdd(policy *XfrmPolicy) error { return ErrNotImplemented } diff --git a/agent/vendor/github.com/vishvananda/netlink/netns_linux.go b/agent/vendor/github.com/vishvananda/netlink/netns_linux.go index 77cf6f46901..2eb29c7ce01 100644 --- a/agent/vendor/github.com/vishvananda/netlink/netns_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/netns_linux.go @@ -87,7 +87,7 @@ func (h *Handle) getNetNsId(attrType int, val uint32) (int, error) { rtgen := nl.NewRtGenMsg() req.AddData(rtgen) - b := make([]byte, 4, 4) + b := make([]byte, 4) native.PutUint32(b, val) attr := nl.NewRtAttr(attrType, b) req.AddData(attr) @@ -126,12 +126,12 @@ func (h *Handle) setNetNsId(attrType int, val uint32, newnsid uint32) error { rtgen := nl.NewRtGenMsg() req.AddData(rtgen) - b := make([]byte, 4, 4) + b := make([]byte, 4) native.PutUint32(b, val) attr := nl.NewRtAttr(attrType, b) req.AddData(attr) - b1 := make([]byte, 4, 4) + b1 := make([]byte, 4) native.PutUint32(b1, newnsid) attr1 := nl.NewRtAttr(NETNSA_NSID, b1) req.AddData(attr1) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go index 14924027e54..183601803b8 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -44,6 +44,7 @@ const ( NLA_F_NESTED uint16 = (1 << 15) // #define NLA_F_NESTED (1 << 15) NLA_F_NET_BYTEORDER uint16 = (1 << 14) // #define NLA_F_NESTED (1 << 14) NLA_TYPE_MASK = ^(NLA_F_NESTED | NLA_F_NET_BYTEORDER) + NLA_ALIGNTO uint16 = 4 // #define NLA_ALIGNTO 4 ) // enum ctattr_type { diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go index db66faaad30..2995da492f6 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go @@ -10,16 +10,38 @@ const ( const ( DEVLINK_CMD_GET = 1 + DEVLINK_CMD_PORT_GET = 5 + DEVLINK_CMD_PORT_SET = 6 + DEVLINK_CMD_PORT_NEW = 7 + DEVLINK_CMD_PORT_DEL = 8 DEVLINK_CMD_ESWITCH_GET = 29 DEVLINK_CMD_ESWITCH_SET = 30 + DEVLINK_CMD_INFO_GET = 51 ) const ( - DEVLINK_ATTR_BUS_NAME = 1 - DEVLINK_ATTR_DEV_NAME = 2 - DEVLINK_ATTR_ESWITCH_MODE = 25 - DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 - DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 + DEVLINK_ATTR_BUS_NAME = 1 + DEVLINK_ATTR_DEV_NAME = 2 + DEVLINK_ATTR_PORT_INDEX = 3 + DEVLINK_ATTR_PORT_TYPE = 4 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 8 + DEVLINK_ATTR_ESWITCH_MODE = 25 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 + DEVLINK_ATTR_PORT_FLAVOUR = 77 + DEVLINK_ATTR_INFO_DRIVER_NAME = 98 + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99 + DEVLINK_ATTR_INFO_VERSION_FIXED = 100 + DEVLINK_ATTR_INFO_VERSION_RUNNING = 101 + DEVLINK_ATTR_INFO_VERSION_STORED = 102 + DEVLINK_ATTR_INFO_VERSION_NAME = 103 + DEVLINK_ATTR_INFO_VERSION_VALUE = 104 + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127 + DEVLINK_ATTR_PORT_FUNCTION = 145 + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150 + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164 ) const ( @@ -38,3 +60,37 @@ const ( DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0 DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1 ) + +const ( + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0 + DEVLINK_PORT_FLAVOUR_CPU = 1 + DEVLINK_PORT_FLAVOUR_DSA = 2 + DEVLINK_PORT_FLAVOUR_PCI_PF = 3 + DEVLINK_PORT_FLAVOUR_PCI_VF = 4 + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5 + DEVLINK_PORT_FLAVOUR_UNUSED = 6 + DEVLINK_PORT_FLAVOUR_PCI_SF = 7 +) + +const ( + DEVLINK_PORT_TYPE_NOTSET = 0 + DEVLINK_PORT_TYPE_AUTO = 1 + DEVLINK_PORT_TYPE_ETH = 2 + DEVLINK_PORT_TYPE_IB = 3 +) + +const ( + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1 + DEVLINK_PORT_FN_ATTR_STATE = 2 + DEVLINK_PORT_FN_ATTR_OPSTATE = 3 +) + +const ( + DEVLINK_PORT_FN_STATE_INACTIVE = 0 + DEVLINK_PORT_FN_STATE_ACTIVE = 1 +) + +const ( + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0 + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1 +) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/link_linux.go index faee2fa03ea..e10edbc09d8 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -173,6 +173,22 @@ const ( IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE ) +const ( + IFLA_GENEVE_UNSPEC = iota + IFLA_GENEVE_ID // vni + IFLA_GENEVE_REMOTE + IFLA_GENEVE_TTL + IFLA_GENEVE_TOS + IFLA_GENEVE_PORT // destination port + IFLA_GENEVE_COLLECT_METADATA + IFLA_GENEVE_REMOTE6 + IFLA_GENEVE_UDP_CSUM + IFLA_GENEVE_UDP_ZERO_CSUM6_TX + IFLA_GENEVE_UDP_ZERO_CSUM6_RX + IFLA_GENEVE_LABEL + IFLA_GENEVE_MAX = IFLA_GENEVE_LABEL +) + const ( IFLA_GRE_UNSPEC = iota IFLA_GRE_LINK @@ -673,3 +689,32 @@ const ( IFLA_IPOIB_UMCAST IFLA_IPOIB_MAX = IFLA_IPOIB_UMCAST ) + +const ( + IFLA_CAN_UNSPEC = iota + IFLA_CAN_BITTIMING + IFLA_CAN_BITTIMING_CONST + IFLA_CAN_CLOCK + IFLA_CAN_STATE + IFLA_CAN_CTRLMODE + IFLA_CAN_RESTART_MS + IFLA_CAN_RESTART + IFLA_CAN_BERR_COUNTER + IFLA_CAN_DATA_BITTIMING + IFLA_CAN_DATA_BITTIMING_CONST + IFLA_CAN_TERMINATION + IFLA_CAN_TERMINATION_CONST + IFLA_CAN_BITRATE_CONST + IFLA_CAN_DATA_BITRATE_CONST + IFLA_CAN_BITRATE_MAX + IFLA_CAN_MAX = IFLA_CAN_BITRATE_MAX +) + +const ( + IFLA_BAREUDP_UNSPEC = iota + IFLA_BAREUDP_PORT + IFLA_BAREUDP_ETHERTYPE + IFLA_BAREUDP_SRCPORT_MIN + IFLA_BAREUDP_MULTIPROTO_MODE + IFLA_BAREUDP_MAX = IFLA_BAREUDP_MULTIPROTO_MODE +) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go new file mode 100644 index 00000000000..bafd593c4fe --- /dev/null +++ b/agent/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go @@ -0,0 +1,29 @@ +package nl + +const ( + LWT_BPF_PROG_UNSPEC = iota + LWT_BPF_PROG_FD + LWT_BPF_PROG_NAME + __LWT_BPF_PROG_MAX +) + +const ( + LWT_BPF_PROG_MAX = __LWT_BPF_PROG_MAX - 1 +) + +const ( + LWT_BPF_UNSPEC = iota + LWT_BPF_IN + LWT_BPF_OUT + LWT_BPF_XMIT + LWT_BPF_XMIT_HEADROOM + __LWT_BPF_MAX +) + +const ( + LWT_BPF_MAX = __LWT_BPF_MAX - 1 +) + +const ( + LWT_BPF_MAX_HEADROOM = 256 +) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index cef64b82ecf..600b942b178 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -27,7 +27,8 @@ const ( // tc rules or filters, or other more memory requiring data. RECEIVE_BUFFER_SIZE = 65536 // Kernel netlink pid - PidKernel uint32 = 0 + PidKernel uint32 = 0 + SizeofCnMsgOp = 0x18 ) // SupportedNlFamilies contains the list of netlink families this netlink package supports @@ -35,6 +36,12 @@ var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETL var nextSeqNr uint32 +// Default netlink socket timeout, 60s +var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0} + +// ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets +var EnableErrorMessageReporting bool = false + // GetIPFamily returns the family type of a net.IP. func GetIPFamily(ip net.IP) int { if len(ip) <= net.IPv4len { @@ -77,11 +84,69 @@ func Swap32(i uint32) uint32 { return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24 } +const ( + NLMSGERR_ATTR_UNUSED = 0 + NLMSGERR_ATTR_MSG = 1 + NLMSGERR_ATTR_OFFS = 2 + NLMSGERR_ATTR_COOKIE = 3 + NLMSGERR_ATTR_POLICY = 4 +) + type NetlinkRequestData interface { Len() int Serialize() []byte } +const ( + PROC_CN_MCAST_LISTEN = 1 + PROC_CN_MCAST_IGNORE +) + +type CbID struct { + Idx uint32 + Val uint32 +} + +type CnMsg struct { + ID CbID + Seq uint32 + Ack uint32 + Length uint16 + Flags uint16 +} + +type CnMsgOp struct { + CnMsg + // here we differ from the C header + Op uint32 +} + +func NewCnMsg(idx, val, op uint32) *CnMsgOp { + var cm CnMsgOp + + cm.ID.Idx = idx + cm.ID.Val = val + + cm.Ack = 0 + cm.Seq = 1 + cm.Length = uint16(binary.Size(op)) + cm.Op = op + + return &cm +} + +func (msg *CnMsgOp) Serialize() []byte { + return (*(*[SizeofCnMsgOp]byte)(unsafe.Pointer(msg)))[:] +} + +func DeserializeCnMsgOp(b []byte) *CnMsgOp { + return (*CnMsgOp)(unsafe.Pointer(&b[0:SizeofCnMsgOp][0])) +} + +func (msg *CnMsgOp) Len() int { + return SizeofCnMsgOp +} + // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { unix.IfInfomsg @@ -249,6 +314,12 @@ func (msg *IfInfomsg) EncapType() string { return fmt.Sprintf("unknown%d", msg.Type) } +// Round the length of a netlink message up to align it properly. +// Taken from syscall/netlink_linux.go by The Go Authors under BSD-style license. +func nlmAlignOf(msglen int) int { + return (msglen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1) +} + func rtaAlignOf(attrlen int) int { return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) } @@ -426,6 +497,19 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro if err != nil { return nil, err } + + if err := s.SetSendTimeout(&SocketTimeoutTv); err != nil { + return nil, err + } + if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil { + return nil, err + } + if EnableErrorMessageReporting { + if err := s.SetExtAck(true); err != nil { + return nil, err + } + } + defer s.Close() } else { s.Lock() @@ -464,11 +548,37 @@ done: } if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { native := NativeEndian() - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { + errno := int32(native.Uint32(m.Data[0:4])) + if errno == 0 { break done } - return nil, syscall.Errno(-error) + var err error + err = syscall.Errno(-errno) + + unreadData := m.Data[4:] + if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { + // Skip the echoed request message. + echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0])) + unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):] + + // Annotate `err` using nlmsgerr attributes. + for len(unreadData) >= syscall.SizeofRtAttr { + attr := (*syscall.RtAttr)(unsafe.Pointer(&unreadData[0])) + attrData := unreadData[syscall.SizeofRtAttr:attr.Len] + + switch attr.Type { + case NLMSGERR_ATTR_MSG: + err = fmt.Errorf("%w: %s", err, string(attrData)) + + default: + // TODO: handle other NLMSGERR_ATTR types + } + + unreadData = unreadData[rtaAlignOf(int(attr.Len)):] + } + } + + return nil, err } if resType != 0 && m.Header.Type != resType { continue @@ -683,6 +793,16 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) } +// SetExtAck requests error messages to be reported on the socket +func (s *NetlinkSocket) SetExtAck(enable bool) error { + var enableN int + if enable { + enableN = 1 + } + + return unix.SetsockoptInt(int(s.fd), unix.SOL_NETLINK, unix.NETLINK_EXT_ACK, enableN) +} + func (s *NetlinkSocket) GetPid() (uint32, error) { fd := int(atomic.LoadInt32(&s.fd)) lsa, err := unix.Getsockname(fd) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/parse_attr.go b/agent/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go similarity index 79% rename from agent/vendor/github.com/vishvananda/netlink/nl/parse_attr.go rename to agent/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go index 19eb8f28eb5..7f49125cffa 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/parse_attr.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go @@ -3,6 +3,7 @@ package nl import ( "encoding/binary" "fmt" + "log" ) type Attribute struct { @@ -18,9 +19,20 @@ func ParseAttributes(data []byte) <-chan Attribute { i := 0 for i+4 < len(data) { length := int(native.Uint16(data[i : i+2])) + attrType := native.Uint16(data[i+2 : i+4]) + + if length < 4 { + log.Printf("attribute 0x%02x has invalid length of %d bytes", attrType, length) + break + } + + if len(data) < i+length { + log.Printf("attribute 0x%02x of length %d is truncated, only %d bytes remaining", attrType, length, len(data)-i) + break + } result <- Attribute{ - Type: native.Uint16(data[i+2 : i+4]), + Type: attrType, Value: data[i+4 : i+length], } i += rtaAlignOf(length) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go index 1224b747def..ce43ee1550c 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go @@ -11,6 +11,8 @@ const ( const ( RDMA_NLDEV_CMD_GET = 1 RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_NEWLINK = 3 + RDMA_NLDEV_CMD_DELLINK = 4 RDMA_NLDEV_CMD_SYS_GET = 6 RDMA_NLDEV_CMD_SYS_SET = 7 ) @@ -30,6 +32,8 @@ const ( RDMA_NLDEV_ATTR_PORT_STATE = 12 RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 + RDMA_NLDEV_ATTR_NDEV_NAME = 51 + RDMA_NLDEV_ATTR_LINK_TYPE = 65 RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 RDMA_NLDEV_NET_NS_FD = 68 ) diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go index 5774cbb15ae..fe88285f20e 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go @@ -23,7 +23,7 @@ func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { return false } for i := range s1.Segments { - if s1.Segments[i].Equal(s2.Segments[i]) != true { + if !s1.Segments[i].Equal(s2.Segments[i]) { return false } } @@ -89,7 +89,7 @@ func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { } buf = buf[12:] if len(buf)%16 != 0 { - err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)\n", len(buf)) + err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)", len(buf)) return mode, nil, err } for len(buf) > 0 { diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/syscall.go b/agent/vendor/github.com/vishvananda/netlink/nl/syscall.go index 4a01e6e5959..bdf6ba63957 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -1,6 +1,6 @@ package nl -// syscall package lack of rule atributes type. +// syscall package lack of rule attributes type. // Thus there are defined below const ( FRA_UNSPEC = iota diff --git a/agent/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/agent/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index c24d53eb796..eb05ff1cd1c 100644 --- a/agent/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -90,6 +90,7 @@ const ( SizeofTcU32Sel = 0x10 // without keys SizeofTcGen = 0x14 SizeofTcConnmark = SizeofTcGen + 0x04 + SizeofTcCsum = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 SizeofTcTunnelKey = SizeofTcGen + 0x04 SizeofTcSkbEdit = SizeofTcGen @@ -694,6 +695,36 @@ func (x *TcConnmark) Serialize() []byte { return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:] } +const ( + TCA_CSUM_UNSPEC = iota + TCA_CSUM_PARMS + TCA_CSUM_TM + TCA_CSUM_PAD + TCA_CSUM_MAX = TCA_CSUM_PAD +) + +// struct tc_csum { +// tc_gen; +// __u32 update_flags; +// } + +type TcCsum struct { + TcGen + UpdateFlags uint32 +} + +func (msg *TcCsum) Len() int { + return SizeofTcCsum +} + +func DeserializeTcCsum(b []byte) *TcCsum { + return (*TcCsum)(unsafe.Pointer(&b[0:SizeofTcCsum][0])) +} + +func (x *TcCsum) Serialize() []byte { + return (*(*[SizeofTcCsum]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_ACT_MIRRED = 8 ) @@ -882,6 +913,111 @@ const ( TCA_HFSC_USC ) +const ( + TCA_FLOWER_UNSPEC = iota + TCA_FLOWER_CLASSID + TCA_FLOWER_INDEV + TCA_FLOWER_ACT + TCA_FLOWER_KEY_ETH_DST /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_DST_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_SRC /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_SRC_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_TYPE /* be16 */ + TCA_FLOWER_KEY_IP_PROTO /* u8 */ + TCA_FLOWER_KEY_IPV4_SRC /* be32 */ + TCA_FLOWER_KEY_IPV4_SRC_MASK /* be32 */ + TCA_FLOWER_KEY_IPV4_DST /* be32 */ + TCA_FLOWER_KEY_IPV4_DST_MASK /* be32 */ + TCA_FLOWER_KEY_IPV6_SRC /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_SRC_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_DST /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_DST_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_TCP_SRC /* be16 */ + TCA_FLOWER_KEY_TCP_DST /* be16 */ + TCA_FLOWER_KEY_UDP_SRC /* be16 */ + TCA_FLOWER_KEY_UDP_DST /* be16 */ + + TCA_FLOWER_FLAGS + TCA_FLOWER_KEY_VLAN_ID /* be16 */ + TCA_FLOWER_KEY_VLAN_PRIO /* u8 */ + TCA_FLOWER_KEY_VLAN_ETH_TYPE /* be16 */ + + TCA_FLOWER_KEY_ENC_KEY_ID /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_SRC /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_DST /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_DST_MASK /* be32 */ + TCA_FLOWER_KEY_ENC_IPV6_SRC /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_DST /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_DST_MASK /* struct in6_addr */ + + TCA_FLOWER_KEY_TCP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_TCP_DST_MASK /* be16 */ + TCA_FLOWER_KEY_UDP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_UDP_DST_MASK /* be16 */ + TCA_FLOWER_KEY_SCTP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_SCTP_DST_MASK /* be16 */ + + TCA_FLOWER_KEY_SCTP_SRC /* be16 */ + TCA_FLOWER_KEY_SCTP_DST /* be16 */ + + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_DST_PORT /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK /* be16 */ + + TCA_FLOWER_KEY_FLAGS /* be32 */ + TCA_FLOWER_KEY_FLAGS_MASK /* be32 */ + + TCA_FLOWER_KEY_ICMPV4_CODE /* u8 */ + TCA_FLOWER_KEY_ICMPV4_CODE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV4_TYPE /* u8 */ + TCA_FLOWER_KEY_ICMPV4_TYPE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV6_CODE /* u8 */ + TCA_FLOWER_KEY_ICMPV6_CODE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV6_TYPE /* u8 */ + TCA_FLOWER_KEY_ICMPV6_TYPE_MASK /* u8 */ + + TCA_FLOWER_KEY_ARP_SIP /* be32 */ + TCA_FLOWER_KEY_ARP_SIP_MASK /* be32 */ + TCA_FLOWER_KEY_ARP_TIP /* be32 */ + TCA_FLOWER_KEY_ARP_TIP_MASK /* be32 */ + TCA_FLOWER_KEY_ARP_OP /* u8 */ + TCA_FLOWER_KEY_ARP_OP_MASK /* u8 */ + TCA_FLOWER_KEY_ARP_SHA /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_SHA_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA_MASK /* ETH_ALEN */ + + TCA_FLOWER_KEY_MPLS_TTL /* u8 - 8 bits */ + TCA_FLOWER_KEY_MPLS_BOS /* u8 - 1 bit */ + TCA_FLOWER_KEY_MPLS_TC /* u8 - 3 bits */ + TCA_FLOWER_KEY_MPLS_LABEL /* be32 - 20 bits */ + + TCA_FLOWER_KEY_TCP_FLAGS /* be16 */ + TCA_FLOWER_KEY_TCP_FLAGS_MASK /* be16 */ + + TCA_FLOWER_KEY_IP_TOS /* u8 */ + TCA_FLOWER_KEY_IP_TOS_MASK /* u8 */ + TCA_FLOWER_KEY_IP_TTL /* u8 */ + TCA_FLOWER_KEY_IP_TTL_MASK /* u8 */ + + TCA_FLOWER_KEY_CVLAN_ID /* be16 */ + TCA_FLOWER_KEY_CVLAN_PRIO /* u8 */ + TCA_FLOWER_KEY_CVLAN_ETH_TYPE /* be16 */ + + TCA_FLOWER_KEY_ENC_IP_TOS /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TOS_MASK /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL_MASK /* u8 */ + + TCA_FLOWER_KEY_ENC_OPTS + TCA_FLOWER_KEY_ENC_OPTS_MASK + + __TCA_FLOWER_MAX +) + // struct tc_sfq_qopt { // unsigned quantum; /* Bytes per round allocated to flow */ // int perturb_period; /* Period of hash perturbation */ diff --git a/agent/vendor/github.com/vishvananda/netlink/proc_event_linux.go b/agent/vendor/github.com/vishvananda/netlink/proc_event_linux.go new file mode 100644 index 00000000000..53bc59a6ecf --- /dev/null +++ b/agent/vendor/github.com/vishvananda/netlink/proc_event_linux.go @@ -0,0 +1,217 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const CN_IDX_PROC = 0x1 + +const ( + PROC_EVENT_NONE = 0x00000000 + PROC_EVENT_FORK = 0x00000001 + PROC_EVENT_EXEC = 0x00000002 + PROC_EVENT_UID = 0x00000004 + PROC_EVENT_GID = 0x00000040 + PROC_EVENT_SID = 0x00000080 + PROC_EVENT_PTRACE = 0x00000100 + PROC_EVENT_COMM = 0x00000200 + PROC_EVENT_COREDUMP = 0x40000000 + PROC_EVENT_EXIT = 0x80000000 +) + +const ( + CN_VAL_PROC = 0x1 + PROC_CN_MCAST_LISTEN = 0x1 +) + +type ProcEventMsg interface { + Pid() uint32 + Tgid() uint32 +} + +type ProcEventHeader struct { + What uint32 + CPU uint32 + Timestamp uint64 +} + +type ProcEvent struct { + ProcEventHeader + Msg ProcEventMsg +} + +func (pe *ProcEvent) setHeader(h ProcEventHeader) { + pe.What = h.What + pe.CPU = h.CPU + pe.Timestamp = h.Timestamp +} + +type ExitProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 + ParentPid uint32 + ParentTgid uint32 +} + +type ExitProcEvent2 struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 + ParentPid uint32 + ParentTgid uint32 +} + +func (e *ExitProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExitProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ExecProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 +} + +func (e *ExecProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExecProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ForkProcEvent struct { + ParentPid uint32 + ParentTgid uint32 + ChildPid uint32 + ChildTgid uint32 +} + +func (e *ForkProcEvent) Pid() uint32 { + return e.ParentPid +} + +func (e *ForkProcEvent) Tgid() uint32 { + return e.ParentTgid +} + +type CommProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + Comm [16]byte +} + +func (e *CommProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *CommProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +func ProcEventMonitor(ch chan<- ProcEvent, done <-chan struct{}, errorChan chan<- error) error { + h, err := NewHandle() + if err != nil { + return err + } + defer h.Delete() + + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_CONNECTOR, CN_IDX_PROC) + if err != nil { + return err + } + + var nlmsg nl.NetlinkRequest + + nlmsg.Pid = uint32(os.Getpid()) + nlmsg.Type = unix.NLMSG_DONE + nlmsg.Len = uint32(unix.SizeofNlMsghdr) + + cm := nl.NewCnMsg(CN_IDX_PROC, CN_VAL_PROC, PROC_CN_MCAST_LISTEN) + nlmsg.AddData(cm) + + s.Send(&nlmsg) + + if done != nil { + go func() { + <-done + s.Close() + }() + } + + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + errorChan <- err + return + } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } + + for _, m := range msgs { + e := parseNetlinkMessage(m) + if e != nil { + ch <- *e + } + } + + } + }() + + return nil +} + +func parseNetlinkMessage(m syscall.NetlinkMessage) *ProcEvent { + if m.Header.Type == unix.NLMSG_DONE { + buf := bytes.NewBuffer(m.Data) + msg := &nl.CnMsg{} + hdr := &ProcEventHeader{} + binary.Read(buf, nl.NativeEndian(), msg) + binary.Read(buf, nl.NativeEndian(), hdr) + + pe := &ProcEvent{} + pe.setHeader(*hdr) + switch hdr.What { + case PROC_EVENT_EXIT: + event := &ExitProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_FORK: + event := &ForkProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_EXEC: + event := &ExecProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_COMM: + event := &CommProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + } + return nil + } + + return nil +} diff --git a/agent/vendor/github.com/vishvananda/netlink/qdisc.go b/agent/vendor/github.com/vishvananda/netlink/qdisc.go index 8418569ee77..f594c9c212d 100644 --- a/agent/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/agent/vendor/github.com/vishvananda/netlink/qdisc.go @@ -308,13 +308,15 @@ func (qdisc *Fq) Type() string { // FQ_Codel (Fair Queuing Controlled Delay) is queuing discipline that combines Fair Queuing with the CoDel AQM scheme. type FqCodel struct { QdiscAttrs - Target uint32 - Limit uint32 - Interval uint32 - ECN uint32 - Flows uint32 - Quantum uint32 - // There are some more attributes here, but support for them seems not ubiquitous + Target uint32 + Limit uint32 + Interval uint32 + ECN uint32 + Flows uint32 + Quantum uint32 + CEThreshold uint32 + DropBatchSize uint32 + MemoryLimit uint32 } func (fqcodel *FqCodel) String() string { diff --git a/agent/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/agent/vendor/github.com/vishvananda/netlink/qdisc_linux.go index d0e1ca194fb..e182e1cfe67 100644 --- a/agent/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -250,7 +250,15 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.Quantum > 0 { options.AddRtAttr(nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) } - + if qdisc.CEThreshold > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_CE_THRESHOLD, nl.Uint32Attr(qdisc.CEThreshold)) + } + if qdisc.DropBatchSize > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_DROP_BATCH_SIZE, nl.Uint32Attr(qdisc.DropBatchSize)) + } + if qdisc.MemoryLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_MEMORY_LIMIT, nl.Uint32Attr(qdisc.MemoryLimit)) + } case *Fq: options.AddRtAttr(nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing)))) @@ -460,7 +468,6 @@ func parsePrioData(qdisc Qdisc, value []byte) error { } func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() htb := qdisc.(*Htb) for _, datum := range data { switch datum.Attr.Type { @@ -480,7 +487,6 @@ func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { } func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() fqCodel := qdisc.(*FqCodel) for _, datum := range data { @@ -497,6 +503,12 @@ func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { fqCodel.Flows = native.Uint32(datum.Value) case nl.TCA_FQ_CODEL_QUANTUM: fqCodel.Quantum = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_CE_THRESHOLD: + fqCodel.CEThreshold = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_DROP_BATCH_SIZE: + fqCodel.DropBatchSize = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_MEMORY_LIMIT: + fqCodel.MemoryLimit = native.Uint32(datum.Value) } } return nil @@ -504,13 +516,11 @@ func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { func parseHfscData(qdisc Qdisc, data []byte) error { Hfsc := qdisc.(*Hfsc) - native = nl.NativeEndian() Hfsc.Defcls = native.Uint16(data) return nil } func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() fq := qdisc.(*Fq) for _, datum := range data { switch datum.Attr.Type { @@ -575,7 +585,6 @@ func parseNetemData(qdisc Qdisc, value []byte) error { } func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { - native = nl.NativeEndian() tbf := qdisc.(*Tbf) for _, datum := range data { switch datum.Attr.Type { @@ -697,3 +706,7 @@ func Xmittime(rate uint64, size uint32) uint32 { // https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/tc/tc_core.c#n62 return time2Tick(uint32(TIME_UNITS_PER_SEC * (float64(size) / float64(rate)))) } + +func Xmitsize(rate uint64, ticks uint32) uint32 { + return uint32((float64(rate) * float64(tick2Time(ticks))) / TIME_UNITS_PER_SEC) +} diff --git a/agent/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/agent/vendor/github.com/vishvananda/netlink/rdma_link_linux.go index ff014ca4cc6..036399db6b0 100644 --- a/agent/vendor/github.com/vishvananda/netlink/rdma_link_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -278,3 +278,54 @@ func (h *Handle) RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { return execRdmaSetLink(req) } + +// RdmaLinkDel deletes an rdma link +// +// Similar to: rdma link delete NAME +// REF: https://man7.org/linux/man-pages/man8/rdma-link.8.html +func RdmaLinkDel(name string) error { + return pkgHandle.RdmaLinkDel(name) +} + +// RdmaLinkDel deletes an rdma link. +func (h *Handle) RdmaLinkDel(name string) error { + link, err := h.RdmaLinkByName(name) + if err != nil { + return err + } + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_DELLINK) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + b := make([]byte, 4) + native.PutUint32(b, link.Attrs.Index) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b)) + + _, err = req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkAdd adds an rdma link for the specified type to the network device. +// Similar to: rdma link add NAME type TYPE netdev NETDEV +// NAME - specifies the new name of the rdma link to add +// TYPE - specifies which rdma type to use. Link types: +// rxe - Soft RoCE driver +// siw - Soft iWARP driver +// NETDEV - specifies the network device to which the link is bound +// +// REF: https://man7.org/linux/man-pages/man8/rdma-link.8.html +func RdmaLinkAdd(linkName, linkType, netdev string) error { + return pkgHandle.RdmaLinkAdd(linkName, linkType, netdev) +} + +// RdmaLinkAdd adds an rdma link for the specified type to the network device. +func (h *Handle) RdmaLinkAdd(linkName string, linkType string, netdev string) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_NEWLINK) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_NAME, nl.ZeroTerminated(linkName))) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_LINK_TYPE, nl.ZeroTerminated(linkType))) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_NDEV_NAME, nl.ZeroTerminated(netdev))) + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} diff --git a/agent/vendor/github.com/vishvananda/netlink/route.go b/agent/vendor/github.com/vishvananda/netlink/route.go index b1625417495..79cc218ec81 100644 --- a/agent/vendor/github.com/vishvananda/netlink/route.go +++ b/agent/vendor/github.com/vishvananda/netlink/route.go @@ -11,6 +11,24 @@ type Scope uint8 type NextHopFlag int +const ( + RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) + RT_FILTER_SCOPE + RT_FILTER_TYPE + RT_FILTER_TOS + RT_FILTER_IIF + RT_FILTER_OIF + RT_FILTER_DST + RT_FILTER_SRC + RT_FILTER_GW + RT_FILTER_TABLE + RT_FILTER_HOPLIMIT + RT_FILTER_PRIORITY + RT_FILTER_MARK + RT_FILTER_MASK + RT_FILTER_REALM +) + type Destination interface { Family() int Decode([]byte) error @@ -27,6 +45,9 @@ type Encap interface { Equal(Encap) bool } +//Protocol describe what was the originator of the route +type RouteProtocol int + // Route represents a netlink route. type Route struct { LinkIndex int @@ -36,8 +57,9 @@ type Route struct { Src net.IP Gw net.IP MultiPath []*NexthopInfo - Protocol int + Protocol RouteProtocol Priority int + Family int Table int Type int Tos int @@ -45,6 +67,8 @@ type Route struct { MPLSDst *int NewDst Destination Encap Encap + Via Destination + Realm int MTU int Window int Rtt int @@ -79,6 +103,9 @@ func (r Route) String() string { if r.Encap != nil { elems = append(elems, fmt.Sprintf("Encap: %s", r.Encap)) } + if r.Via != nil { + elems = append(elems, fmt.Sprintf("Via: %s", r.Via)) + } elems = append(elems, fmt.Sprintf("Src: %s", r.Src)) if len(r.MultiPath) > 0 { elems = append(elems, fmt.Sprintf("Gw: %s", r.MultiPath)) @@ -87,6 +114,7 @@ func (r Route) String() string { } elems = append(elems, fmt.Sprintf("Flags: %s", r.ListFlags())) elems = append(elems, fmt.Sprintf("Table: %d", r.Table)) + elems = append(elems, fmt.Sprintf("Realm: %d", r.Realm)) return fmt.Sprintf("{%s}", strings.Join(elems, " ")) } @@ -100,6 +128,7 @@ func (r Route) Equal(x Route) bool { nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) && r.Protocol == x.Protocol && r.Priority == x.Priority && + r.Realm == x.Realm && r.Table == x.Table && r.Type == x.Type && r.Tos == x.Tos && @@ -107,6 +136,7 @@ func (r Route) Equal(x Route) bool { r.Flags == x.Flags && (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) && (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) && + (r.Via == x.Via || (r.Via != nil && r.Via.Equal(x.Via))) && (r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap))) } @@ -136,6 +166,7 @@ type NexthopInfo struct { Flags int NewDst Destination Encap Encap + Via Destination } func (n *NexthopInfo) String() string { @@ -147,6 +178,9 @@ func (n *NexthopInfo) String() string { if n.Encap != nil { elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap)) } + if n.Via != nil { + elems = append(elems, fmt.Sprintf("Via: %s", n.Via)) + } elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1)) elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw)) elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags())) diff --git a/agent/vendor/github.com/vishvananda/netlink/route_linux.go b/agent/vendor/github.com/vishvananda/netlink/route_linux.go index 4e778a417b8..8da8866573c 100644 --- a/agent/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/route_linux.go @@ -1,8 +1,11 @@ package netlink import ( + "bytes" + "encoding/binary" "fmt" "net" + "strconv" "strings" "syscall" @@ -21,22 +24,23 @@ const ( SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE ) -const ( - RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) - RT_FILTER_SCOPE - RT_FILTER_TYPE - RT_FILTER_TOS - RT_FILTER_IIF - RT_FILTER_OIF - RT_FILTER_DST - RT_FILTER_SRC - RT_FILTER_GW - RT_FILTER_TABLE - RT_FILTER_HOPLIMIT - RT_FILTER_PRIORITY - RT_FILTER_MARK - RT_FILTER_MASK -) +func (s Scope) String() string { + switch s { + case SCOPE_UNIVERSE: + return "universe" + case SCOPE_SITE: + return "site" + case SCOPE_LINK: + return "link" + case SCOPE_HOST: + return "host" + case SCOPE_NOWHERE: + return "nowhere" + default: + return "unknown" + } +} + const ( FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK @@ -131,7 +135,6 @@ func (e *MPLSEncap) Decode(buf []byte) error { if len(buf) < 4 { return fmt.Errorf("lack of bytes") } - native := nl.NativeEndian() l := native.Uint16(buf) if len(buf) < int(l) { return fmt.Errorf("lack of bytes") @@ -147,7 +150,6 @@ func (e *MPLSEncap) Decode(buf []byte) error { func (e *MPLSEncap) Encode() ([]byte, error) { s := nl.EncodeMPLSStack(e.Labels...) - native := nl.NativeEndian() hdr := make([]byte, 4) native.PutUint16(hdr, uint16(len(s)+4)) native.PutUint16(hdr[2:], nl.MPLS_IPTUNNEL_DST) @@ -203,7 +205,6 @@ func (e *SEG6Encap) Decode(buf []byte) error { if len(buf) < 4 { return fmt.Errorf("lack of bytes") } - native := nl.NativeEndian() // Get Length(l) & Type(typ) : 2 + 2 bytes l := native.Uint16(buf) if len(buf) < int(l) { @@ -223,7 +224,6 @@ func (e *SEG6Encap) Decode(buf []byte) error { } func (e *SEG6Encap) Encode() ([]byte, error) { s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) - native := nl.NativeEndian() hdr := make([]byte, 4) native.PutUint16(hdr, uint16(len(s)+4)) native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) @@ -233,7 +233,7 @@ func (e *SEG6Encap) String() string { segs := make([]string, 0, len(e.Segments)) // append segment backwards (from n to 0) since seg#0 is the last segment. for i := len(e.Segments); i > 0; i-- { - segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + segs = append(segs, e.Segments[i-1].String()) } str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), len(e.Segments), strings.Join(segs, " ")) @@ -284,7 +284,6 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { if err != nil { return err } - native := nl.NativeEndian() for _, attr := range attrs { switch attr.Attr.Type { case nl.SEG6_LOCAL_ACTION: @@ -314,7 +313,6 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { } func (e *SEG6LocalEncap) Encode() ([]byte, error) { var err error - native := nl.NativeEndian() res := make([]byte, 8) native.PutUint16(res, 8) // length native.PutUint16(res[2:], nl.SEG6_LOCAL_ACTION) @@ -405,7 +403,7 @@ func (e *SEG6LocalEncap) String() string { segs := make([]string, 0, len(e.Segments)) //append segment backwards (from n to 0) since seg#0 is the last segment. for i := len(e.Segments); i > 0; i-- { - segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) + segs = append(segs, e.Segments[i-1].String()) } strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " "))) } @@ -446,6 +444,207 @@ func (e *SEG6LocalEncap) Equal(x Encap) bool { return true } +// Encap BPF definitions +type bpfObj struct { + progFd int + progName string +} +type BpfEncap struct { + progs [nl.LWT_BPF_MAX]bpfObj + headroom int +} + +// SetProg adds a bpf function to the route via netlink RTA_ENCAP. The fd must be a bpf +// program loaded with bpf(type=BPF_PROG_TYPE_LWT_*) matching the direction the program should +// be applied to (LWT_BPF_IN, LWT_BPF_OUT, LWT_BPF_XMIT). +func (e *BpfEncap) SetProg(mode, progFd int, progName string) error { + if progFd <= 0 { + return fmt.Errorf("lwt bpf SetProg: invalid fd") + } + if mode <= nl.LWT_BPF_UNSPEC || mode >= nl.LWT_BPF_XMIT_HEADROOM { + return fmt.Errorf("lwt bpf SetProg:invalid mode") + } + e.progs[mode].progFd = progFd + e.progs[mode].progName = fmt.Sprintf("%s[fd:%d]", progName, progFd) + return nil +} + +// SetXmitHeadroom sets the xmit headroom (LWT_BPF_MAX_HEADROOM) via netlink RTA_ENCAP. +// maximum headroom is LWT_BPF_MAX_HEADROOM +func (e *BpfEncap) SetXmitHeadroom(headroom int) error { + if headroom > nl.LWT_BPF_MAX_HEADROOM || headroom < 0 { + return fmt.Errorf("invalid headroom size. range is 0 - %d", nl.LWT_BPF_MAX_HEADROOM) + } + e.headroom = headroom + return nil +} + +func (e *BpfEncap) Type() int { + return nl.LWTUNNEL_ENCAP_BPF +} +func (e *BpfEncap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lwt bpf decode: lack of bytes") + } + native := nl.NativeEndian() + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return fmt.Errorf("lwt bpf decode: failed parsing attribute. err: %v", err) + } + for _, attr := range attrs { + if int(attr.Attr.Type) < 1 { + // nl.LWT_BPF_UNSPEC + continue + } + if int(attr.Attr.Type) > nl.LWT_BPF_MAX { + return fmt.Errorf("lwt bpf decode: received unknown attribute type: %d", attr.Attr.Type) + } + switch int(attr.Attr.Type) { + case nl.LWT_BPF_MAX_HEADROOM: + e.headroom = int(native.Uint32(attr.Value)) + default: + bpfO := bpfObj{} + parsedAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return fmt.Errorf("lwt bpf decode: failed parsing route attribute") + } + for _, parsedAttr := range parsedAttrs { + switch int(parsedAttr.Attr.Type) { + case nl.LWT_BPF_PROG_FD: + bpfO.progFd = int(native.Uint32(parsedAttr.Value)) + case nl.LWT_BPF_PROG_NAME: + bpfO.progName = string(parsedAttr.Value) + default: + return fmt.Errorf("lwt bpf decode: received unknown attribute: type: %d, len: %d", parsedAttr.Attr.Type, parsedAttr.Attr.Len) + } + } + e.progs[attr.Attr.Type] = bpfO + } + } + return nil +} + +func (e *BpfEncap) Encode() ([]byte, error) { + buf := make([]byte, 0) + native = nl.NativeEndian() + for index, attr := range e.progs { + nlMsg := nl.NewRtAttr(index, []byte{}) + if attr.progFd != 0 { + nlMsg.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(attr.progFd))) + } + if attr.progName != "" { + nlMsg.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(attr.progName)) + } + if nlMsg.Len() > 4 { + buf = append(buf, nlMsg.Serialize()...) + } + } + if len(buf) <= 4 { + return nil, fmt.Errorf("lwt bpf encode: bpf obj definitions returned empty buffer") + } + if e.headroom > 0 { + hRoom := nl.NewRtAttr(nl.LWT_BPF_XMIT_HEADROOM, nl.Uint32Attr(uint32(e.headroom))) + buf = append(buf, hRoom.Serialize()...) + } + return buf, nil +} + +func (e *BpfEncap) String() string { + progs := make([]string, 0) + for index, obj := range e.progs { + empty := bpfObj{} + switch index { + case nl.LWT_BPF_IN: + if obj != empty { + progs = append(progs, fmt.Sprintf("in: %s", obj.progName)) + } + case nl.LWT_BPF_OUT: + if obj != empty { + progs = append(progs, fmt.Sprintf("out: %s", obj.progName)) + } + case nl.LWT_BPF_XMIT: + if obj != empty { + progs = append(progs, fmt.Sprintf("xmit: %s", obj.progName)) + } + } + } + if e.headroom > 0 { + progs = append(progs, fmt.Sprintf("xmit headroom: %d", e.headroom)) + } + return strings.Join(progs, " ") +} + +func (e *BpfEncap) Equal(x Encap) bool { + o, ok := x.(*BpfEncap) + if !ok { + return false + } + if e.headroom != o.headroom { + return false + } + for i := range o.progs { + if o.progs[i] != e.progs[i] { + return false + } + } + return true +} + +type Via struct { + AddrFamily int + Addr net.IP +} + +func (v *Via) Equal(x Destination) bool { + o, ok := x.(*Via) + if !ok { + return false + } + if v.AddrFamily == x.Family() && v.Addr.Equal(o.Addr) { + return true + } + return false +} + +func (v *Via) String() string { + return fmt.Sprintf("Family: %d, Address: %s", v.AddrFamily, v.Addr.String()) +} + +func (v *Via) Family() int { + return v.AddrFamily +} + +func (v *Via) Encode() ([]byte, error) { + buf := &bytes.Buffer{} + err := binary.Write(buf, native, uint16(v.AddrFamily)) + if err != nil { + return nil, err + } + err = binary.Write(buf, native, v.Addr) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (v *Via) Decode(b []byte) error { + if len(b) < 6 { + return fmt.Errorf("decoding failed: buffer too small (%d bytes)", len(b)) + } + v.AddrFamily = int(native.Uint16(b[0:2])) + if v.AddrFamily == nl.FAMILY_V4 { + v.Addr = net.IP(b[2:6]) + return nil + } else if v.AddrFamily == nl.FAMILY_V6 { + if len(b) < 18 { + return fmt.Errorf("decoding failed: buffer too small (%d bytes)", len(b)) + } + v.Addr = net.IP(b[2:]) + return nil + } + return fmt.Errorf("decoding failed: address family %d unknown", v.AddrFamily) +} + // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -460,6 +659,32 @@ func (h *Handle) RouteAdd(route *Route) error { return h.routeHandle(route, req, nl.NewRtMsg()) } +// RouteAppend will append a route to the system. +// Equivalent to: `ip route append $route` +func RouteAppend(route *Route) error { + return pkgHandle.RouteAppend(route) +} + +// RouteAppend will append a route to the system. +// Equivalent to: `ip route append $route` +func (h *Handle) RouteAppend(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_APPEND | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + return h.routeHandle(route, req, nl.NewRtMsg()) +} + +// RouteAddEcmp will add a route to the system. +func RouteAddEcmp(route *Route) error { + return pkgHandle.RouteAddEcmp(route) +} + +// RouteAddEcmp will add a route to the system. +func (h *Handle) RouteAddEcmp(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + return h.routeHandle(route, req, nl.NewRtMsg()) +} + // RouteReplace will add a route to the system. // Equivalent to: `ip route replace $route` func RouteReplace(route *Route) error { @@ -533,7 +758,13 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if err != nil { return err } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) + switch route.Encap.Type() { + case nl.LWTUNNEL_ENCAP_BPF: + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP|unix.NLA_F_NESTED, buf)) + default: + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) + } + } if route.Src != nil { @@ -567,6 +798,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) } + if route.Via != nil { + buf, err := route.Via.Encode() + if err != nil { + return fmt.Errorf("failed to encode RTA_VIA: %v", err) + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_VIA, buf)) + } + if len(route.MultiPath) > 0 { buf := []byte{} for _, nh := range route.MultiPath { @@ -609,6 +848,13 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } + if nh.Via != nil { + buf, err := nh.Via.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(unix.RTA_VIA, buf)) + } rtnh.Children = children buf = append(buf, rtnh.Serialize()...) } @@ -631,6 +877,11 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg native.PutUint32(b, uint32(route.Priority)) rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) } + if route.Realm > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Realm)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_FLOW, b)) + } if route.Tos > 0 { msg.Tos = uint8(route.Tos) } @@ -723,10 +974,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg req.AddData(attr) } - var ( - b = make([]byte, 4) - native = nl.NativeEndian() - ) + b := make([]byte, 4) native.PutUint32(b, uint32(route.LinkIndex)) req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) @@ -765,8 +1013,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(family) - req.AddData(infmsg) + rtmsg := nl.NewRtMsg() + rtmsg.Family = uint8(family) + req.AddData(rtmsg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { @@ -802,6 +1051,8 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) continue case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos: continue + case filterMask&RT_FILTER_REALM != 0 && route.Realm != filter.Realm: + continue case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex: continue case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex: @@ -834,14 +1085,14 @@ func deserializeRoute(m []byte) (Route, error) { } route := Route{ Scope: Scope(msg.Scope), - Protocol: int(msg.Protocol), + Protocol: RouteProtocol(int(msg.Protocol)), Table: int(msg.Table), Type: int(msg.Type), Tos: int(msg.Tos), Flags: int(msg.Flags), + Family: int(msg.Family), } - native := nl.NativeEndian() var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { @@ -868,6 +1119,8 @@ func deserializeRoute(m []byte) (Route, error) { route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) case unix.RTA_PRIORITY: route.Priority = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_FLOW: + route.Realm = int(native.Uint32(attr.Value[0:4])) case unix.RTA_TABLE: route.Table = int(native.Uint32(attr.Value[0:4])) case unix.RTA_MULTIPATH: @@ -907,6 +1160,12 @@ func deserializeRoute(m []byte) (Route, error) { encapType = attr case unix.RTA_ENCAP: encap = attr + case unix.RTA_VIA: + d := &Via{} + if err := d.Decode(attr.Value); err != nil { + return nil, nil, err + } + info.Via = d } } @@ -944,6 +1203,12 @@ func deserializeRoute(m []byte) (Route, error) { return route, err } route.NewDst = d + case unix.RTA_VIA: + v := &Via{} + if err := v.Decode(attr.Value); err != nil { + return route, err + } + route.Via = v case unix.RTA_ENCAP_TYPE: encapType = attr case unix.RTA_ENCAP: @@ -1011,6 +1276,11 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } + case nl.LWTUNNEL_ENCAP_BPF: + e = &BpfEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } } route.Encap = e } @@ -1021,7 +1291,10 @@ func deserializeRoute(m []byte) (Route, error) { // RouteGetOptions contains a set of options to use with // RouteGetWithOptions type RouteGetOptions struct { + Iif string + Oif string VrfName string + SrcAddr net.IP } // RouteGetWithOptions gets a route to a specific destination from the host system. @@ -1053,23 +1326,61 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption msg := &nl.RtMsg{} msg.Family = uint8(family) msg.Dst_len = bitlen + if options != nil && options.SrcAddr != nil { + msg.Src_len = bitlen + } + msg.Flags = unix.RTM_F_LOOKUP_TABLE req.AddData(msg) rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) req.AddData(rtaDst) if options != nil { - link, err := LinkByName(options.VrfName) - if err != nil { - return nil, err + if options.VrfName != "" { + link, err := LinkByName(options.VrfName) + if err != nil { + return nil, err + } + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) } - var ( - b = make([]byte, 4) - native = nl.NativeEndian() - ) - native.PutUint32(b, uint32(link.Attrs().Index)) - req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + if len(options.Iif) > 0 { + link, err := LinkByName(options.Iif) + if err != nil { + return nil, err + } + + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_IIF, b)) + } + + if len(options.Oif) > 0 { + link, err := LinkByName(options.Oif) + if err != nil { + return nil, err + } + + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + + if options.SrcAddr != nil { + var srcAddr []byte + if family == FAMILY_V4 { + srcAddr = options.SrcAddr.To4() + } else { + srcAddr = options.SrcAddr.To16() + } + + req.AddData(nl.NewRtAttr(unix.RTA_SRC, srcAddr)) + } } msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) @@ -1151,7 +1462,8 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < msgs, from, err := s.Receive() if err != nil { if cberr != nil { - cberr(err) + cberr(fmt.Errorf("Receive failed: %v", + err)) } return } @@ -1166,22 +1478,22 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < continue } if m.Header.Type == unix.NLMSG_ERROR { - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) } - return + continue } route, err := deserializeRoute(m.Data) if err != nil { if cberr != nil { cberr(err) } - return + continue } ch <- RouteUpdate{Type: m.Header.Type, Route: route} } @@ -1190,3 +1502,54 @@ func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done < return nil } + +func (p RouteProtocol) String() string { + switch int(p) { + case unix.RTPROT_BABEL: + return "babel" + case unix.RTPROT_BGP: + return "bgp" + case unix.RTPROT_BIRD: + return "bird" + case unix.RTPROT_BOOT: + return "boot" + case unix.RTPROT_DHCP: + return "dhcp" + case unix.RTPROT_DNROUTED: + return "dnrouted" + case unix.RTPROT_EIGRP: + return "eigrp" + case unix.RTPROT_GATED: + return "gated" + case unix.RTPROT_ISIS: + return "isis" + //case unix.RTPROT_KEEPALIVED: + // return "keepalived" + case unix.RTPROT_KERNEL: + return "kernel" + case unix.RTPROT_MROUTED: + return "mrouted" + case unix.RTPROT_MRT: + return "mrt" + case unix.RTPROT_NTK: + return "ntk" + case unix.RTPROT_OSPF: + return "ospf" + case unix.RTPROT_RA: + return "ra" + case unix.RTPROT_REDIRECT: + return "redirect" + case unix.RTPROT_RIP: + return "rip" + case unix.RTPROT_STATIC: + return "static" + case unix.RTPROT_UNSPEC: + return "unspec" + case unix.RTPROT_XORP: + return "xorp" + case unix.RTPROT_ZEBRA: + return "zebra" + default: + return strconv.Itoa(int(p)) + } +} diff --git a/agent/vendor/github.com/vishvananda/netlink/route_unspecified.go b/agent/vendor/github.com/vishvananda/netlink/route_unspecified.go index 2701862b4b9..db737268958 100644 --- a/agent/vendor/github.com/vishvananda/netlink/route_unspecified.go +++ b/agent/vendor/github.com/vishvananda/netlink/route_unspecified.go @@ -2,6 +2,8 @@ package netlink +import "strconv" + func (r *Route) ListFlags() []string { return []string{} } @@ -9,3 +11,11 @@ func (r *Route) ListFlags() []string { func (n *NexthopInfo) ListFlags() []string { return []string{} } + +func (s Scope) String() string { + return "unknown" +} + +func (p RouteProtocol) String() string { + return strconv.Itoa(int(p)) +} diff --git a/agent/vendor/github.com/vishvananda/netlink/rule.go b/agent/vendor/github.com/vishvananda/netlink/rule.go index 95f2facfb14..53cd3d4f6ac 100644 --- a/agent/vendor/github.com/vishvananda/netlink/rule.go +++ b/agent/vendor/github.com/vishvananda/netlink/rule.go @@ -25,10 +25,22 @@ type Rule struct { Invert bool Dport *RulePortRange Sport *RulePortRange + IPProto int } func (r Rule) String() string { - return fmt.Sprintf("ip rule %d: from %s table %d", r.Priority, r.Src, r.Table) + from := "all" + if r.Src != nil && r.Src.String() != "" { + from = r.Src.String() + } + + to := "all" + if r.Dst != nil && r.Dst.String() != "" { + to = r.Dst.String() + } + + return fmt.Sprintf("ip rule %d: from %s to %s table %d", + r.Priority, from, to, r.Table) } // NewRule return empty rules. diff --git a/agent/vendor/github.com/vishvananda/netlink/rule_linux.go b/agent/vendor/github.com/vishvananda/netlink/rule_linux.go index 40474f30eb9..3ae2138808e 100644 --- a/agent/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -97,8 +97,6 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(rtAttrs[i]) } - native := nl.NativeEndian() - if rule.Priority >= 0 { b := make([]byte, 4) native.PutUint32(b, uint32(rule.Priority)) @@ -154,6 +152,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } + if rule.IPProto > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.IPProto)) + req.AddData(nl.NewRtAttr(nl.FRA_IP_PROTO, b)) + } + if rule.Dport != nil { b := rule.Dport.toRtAttrData() req.AddData(nl.NewRtAttr(nl.FRA_DPORT_RANGE, b)) @@ -199,7 +203,6 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( return nil, err } - native := nl.NativeEndian() var res = make([]Rule, 0) for i := range msgs { msg := nl.DeserializeRtMsg(msgs[i]) @@ -232,7 +235,7 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( case nl.FRA_FWMASK: rule.Mask = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_TUN_ID: - rule.TunID = uint(native.Uint64(attrs[j].Value[0:4])) + rule.TunID = uint(native.Uint64(attrs[j].Value[0:8])) case nl.FRA_IIFNAME: rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) case nl.FRA_OIFNAME: @@ -253,6 +256,8 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_PRIORITY: rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_IP_PROTO: + rule.IPProto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_DPORT_RANGE: rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) case nl.FRA_SPORT_RANGE: diff --git a/agent/vendor/github.com/vishvananda/netlink/socket_linux.go b/agent/vendor/github.com/vishvananda/netlink/socket_linux.go index e4e7f7ac3a8..b881fe496dd 100644 --- a/agent/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -172,35 +172,78 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { return sock, nil } -// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type. +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { - s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + var result []*InetDiagTCPInfoResp + err := socketDiagTCPExecutor(family, func(m syscall.NetlinkMessage) error { + sockInfo := &Socket{} + if err := sockInfo.deserialize(m.Data); err != nil { + return err + } + attrs, err := nl.ParseRouteAttr(m.Data[sizeofSocket:]) + if err != nil { + return err + } + + res, err := attrsToInetDiagTCPInfoResp(attrs, sockInfo) + if err != nil { + return err + } + + result = append(result, res) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +func SocketDiagTCP(family uint8) ([]*Socket, error) { + var result []*Socket + err := socketDiagTCPExecutor(family, func(m syscall.NetlinkMessage) error { + sockInfo := &Socket{} + if err := sockInfo.deserialize(m.Data); err != nil { + return err + } + result = append(result, sockInfo) + return nil + }) if err != nil { return nil, err } + return result, nil +} + +// socketDiagTCPExecutor requests INET_DIAG_INFO for TCP protocol for specified family type. +func socketDiagTCPExecutor(family uint8, receiver func(syscall.NetlinkMessage) error) error { + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + if err != nil { + return err + } defer s.Close() req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) req.AddData(&socketRequest{ Family: family, Protocol: unix.IPPROTO_TCP, - Ext: INET_DIAG_INFO, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), States: uint32(0xfff), // All TCP states }) s.Send(req) - var result []*InetDiagTCPInfoResp loop: for { msgs, from, err := s.Receive() if err != nil { - return nil, err + return err } if from.Pid != nl.PidKernel { - return nil, fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) } if len(msgs) == 0 { - return nil, errors.New("no message nor error from netlink") + return errors.New("no message nor error from netlink") } for _, m := range msgs { @@ -208,31 +251,41 @@ loop: case unix.NLMSG_DONE: break loop case unix.NLMSG_ERROR: - native := nl.NativeEndian() error := int32(native.Uint32(m.Data[0:4])) - return nil, syscall.Errno(-error) + return syscall.Errno(-error) } - sockInfo := &Socket{} - if err := sockInfo.deserialize(m.Data); err != nil { - return nil, err + if err := receiver(m); err != nil { + return err } - attrs, err := nl.ParseRouteAttr(m.Data[sizeofSocket:]) - if err != nil { + } + } + return nil +} + +func attrsToInetDiagTCPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagTCPInfoResp, error) { + var tcpInfo *TCPInfo + var tcpBBRInfo *TCPBBRInfo + for _, a := range attrs { + if a.Attr.Type == INET_DIAG_INFO { + tcpInfo = &TCPInfo{} + if err := tcpInfo.deserialize(a.Value); err != nil { return nil, err } - var tcpInfo *TCPInfo - for _, a := range attrs { - if a.Attr.Type == INET_DIAG_INFO { - tcpInfo = &TCPInfo{} - if err := tcpInfo.deserialize(a.Value); err != nil { - return nil, err - } - break - } + continue + } + + if a.Attr.Type == INET_DIAG_BBRINFO { + tcpBBRInfo = &TCPBBRInfo{} + if err := tcpBBRInfo.deserialize(a.Value); err != nil { + return nil, err } - r := &InetDiagTCPInfoResp{InetDiagMsg: sockInfo, TCPInfo: tcpInfo} - result = append(result, r) + continue } } - return result, nil + + return &InetDiagTCPInfoResp{ + InetDiagMsg: sockInfo, + TCPInfo: tcpInfo, + TCPBBRInfo: tcpBBRInfo, + }, nil } diff --git a/agent/vendor/github.com/vishvananda/netlink/tcp.go b/agent/vendor/github.com/vishvananda/netlink/tcp.go index 4a42ee5a60e..23ca014d43b 100644 --- a/agent/vendor/github.com/vishvananda/netlink/tcp.go +++ b/agent/vendor/github.com/vishvananda/netlink/tcp.go @@ -16,3 +16,69 @@ const ( TCP_NEW_SYN_REC TCP_MAX_STATES ) + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Snd_wscale uint8 // no uint4 + Rcv_wscale uint8 + Delivery_rate_app_limited uint8 + Fastopen_client_fail uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ + Bytes_received uint64 /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ + Segs_out uint32 /* RFC4898 tcpEStatsPerfSegsOut */ + Segs_in uint32 /* RFC4898 tcpEStatsPerfSegsIn */ + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 /* RFC4898 tcpEStatsDataSegsIn */ + Data_segs_out uint32 /* RFC4898 tcpEStatsDataSegsOut */ + Delivery_rate uint64 + Busy_time uint64 /* Time (usec) busy sending data */ + Rwnd_limited uint64 /* Time (usec) limited by receive window */ + Sndbuf_limited uint64 /* Time (usec) limited by send buffer */ + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ + Bytes_retrans uint64 /* RFC4898 tcpEStatsPerfOctetsRetrans */ + Dsack_dups uint32 /* RFC4898 tcpEStatsStackDSACKDups */ + Reord_seen uint32 /* reordering events seen */ + Rcv_ooopack uint32 /* Out-of-order packets received */ + Snd_wnd uint32 /* peer's advertised receive window after * scaling (bytes) */ +} + +type TCPBBRInfo struct { + BBRBW uint64 + BBRMinRTT uint32 + BBRPacingGain uint32 + BBRCwndGain uint32 +} diff --git a/agent/vendor/github.com/vishvananda/netlink/tcp_linux.go b/agent/vendor/github.com/vishvananda/netlink/tcp_linux.go index 741ea165584..293858738d8 100644 --- a/agent/vendor/github.com/vishvananda/netlink/tcp_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/tcp_linux.go @@ -2,67 +2,13 @@ package netlink import ( "bytes" + "errors" "io" ) -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Snd_wscale uint8 // no uint4 - Rcv_wscale uint8 - Delivery_rate_app_limited uint8 - Fastopen_client_fail uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ - Bytes_received uint64 /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ - Segs_out uint32 /* RFC4898 tcpEStatsPerfSegsOut */ - Segs_in uint32 /* RFC4898 tcpEStatsPerfSegsIn */ - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 /* RFC4898 tcpEStatsDataSegsIn */ - Data_segs_out uint32 /* RFC4898 tcpEStatsDataSegsOut */ - Delivery_rate uint64 - Busy_time uint64 /* Time (usec) busy sending data */ - Rwnd_limited uint64 /* Time (usec) limited by receive window */ - Sndbuf_limited uint64 /* Time (usec) limited by send buffer */ - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ - Bytes_retrans uint64 /* RFC4898 tcpEStatsPerfOctetsRetrans */ - Dsack_dups uint32 /* RFC4898 tcpEStatsStackDSACKDups */ - Reord_seen uint32 /* reordering events seen */ - Rcv_ooopack uint32 /* Out-of-order packets received */ - Snd_wnd uint32 /* peer's advertised receive window after * scaling (bytes) */ -} +const ( + tcpBBRInfoLen = 20 +) func checkDeserErr(err error) error { if err == io.EOF { @@ -391,3 +337,17 @@ func (t *TCPInfo) deserialize(b []byte) error { t.Snd_wnd = native.Uint32(next) return nil } + +func (t *TCPBBRInfo) deserialize(b []byte) error { + if len(b) != tcpBBRInfoLen { + return errors.New("Invalid length") + } + + rb := bytes.NewBuffer(b) + t.BBRBW = native.Uint64(rb.Next(8)) + t.BBRMinRTT = native.Uint32(rb.Next(4)) + t.BBRPacingGain = native.Uint32(rb.Next(4)) + t.BBRCwndGain = native.Uint32(rb.Next(4)) + + return nil +} diff --git a/agent/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/agent/vendor/github.com/vishvananda/netlink/xfrm_policy.go index 6219d277258..b7532b092cb 100644 --- a/agent/vendor/github.com/vishvananda/netlink/xfrm_policy.go +++ b/agent/vendor/github.com/vishvananda/netlink/xfrm_policy.go @@ -58,12 +58,13 @@ func (a PolicyAction) String() string { // policy. These rules are matched with XfrmState to determine encryption // and authentication algorithms. type XfrmPolicyTmpl struct { - Dst net.IP - Src net.IP - Proto Proto - Mode Mode - Spi int - Reqid int + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + Optional int } func (t XfrmPolicyTmpl) String() string { diff --git a/agent/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/agent/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index a4e132ef55f..35849680413 100644 --- a/agent/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -79,6 +79,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi)) userTmpl.Mode = uint8(tmpl.Mode) userTmpl.Reqid = uint32(tmpl.Reqid) + userTmpl.Optional = uint8(tmpl.Optional) userTmpl.Aalgos = ^uint32(0) userTmpl.Ealgos = ^uint32(0) userTmpl.Calgos = ^uint32(0) @@ -92,8 +93,10 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) - req.AddData(ifId) + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -188,8 +191,10 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) - req.AddData(ifId) + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } resType := nl.XFRM_MSG_NEWPOLICY if nlProto == nl.XFRM_MSG_DELPOLICY { @@ -247,6 +252,7 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { resTmpl.Mode = Mode(tmpl.Mode) resTmpl.Spi = int(nl.Swap32(tmpl.XfrmId.Spi)) resTmpl.Reqid = int(tmpl.Reqid) + resTmpl.Optional = int(tmpl.Optional) policy.Tmpls = append(policy.Tmpls, resTmpl) } case nl.XFRMA_MARK: diff --git a/agent/vendor/github.com/vishvananda/netlink/xfrm_state.go b/agent/vendor/github.com/vishvananda/netlink/xfrm_state.go index 483d8934a8f..19df82c7632 100644 --- a/agent/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/agent/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -94,7 +94,7 @@ type XfrmState struct { Limits XfrmStateLimits Statistics XfrmStateStats Mark *XfrmMark - OutputMark int + OutputMark *XfrmMark Ifid int Auth *XfrmStateAlgo Crypt *XfrmStateAlgo @@ -104,7 +104,7 @@ type XfrmState struct { } func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %d, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) } func (sa XfrmState) Print(stats bool) string { diff --git a/agent/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/agent/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 66c99423c47..61a2d2dea28 100644 --- a/agent/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/agent/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -111,7 +111,7 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { // A state with spi 0 can't be deleted so don't allow it to be set if state.Spi == 0 { - return fmt.Errorf("Spi must be set when adding xfrm state.") + return fmt.Errorf("Spi must be set when adding xfrm state") } req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) @@ -158,13 +158,19 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow)) req.AddData(out) } - if state.OutputMark != 0 { - out := nl.NewRtAttr(nl.XFRMA_OUTPUT_MARK, nl.Uint32Attr(uint32(state.OutputMark))) + if state.OutputMark != nil { + out := nl.NewRtAttr(nl.XFRMA_SET_MARK, nl.Uint32Attr(state.OutputMark.Value)) req.AddData(out) + if state.OutputMark.Mask != 0 { + out = nl.NewRtAttr(nl.XFRMA_SET_MARK_MASK, nl.Uint32Attr(state.OutputMark.Mask)) + req.AddData(out) + } } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) - req.AddData(ifId) + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -277,8 +283,10 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) - req.AddData(ifId) + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { @@ -377,8 +385,19 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { state.Mark = new(XfrmMark) state.Mark.Value = mark.Value state.Mark.Mask = mark.Mask - case nl.XFRMA_OUTPUT_MARK: - state.OutputMark = int(native.Uint32(attr.Value)) + case nl.XFRMA_SET_MARK: + if state.OutputMark == nil { + state.OutputMark = new(XfrmMark) + } + state.OutputMark.Value = native.Uint32(attr.Value) + case nl.XFRMA_SET_MARK_MASK: + if state.OutputMark == nil { + state.OutputMark = new(XfrmMark) + } + state.OutputMark.Mask = native.Uint32(attr.Value) + if state.OutputMark.Mask == 0xffffffff { + state.OutputMark.Mask = 0 + } case nl.XFRMA_IF_ID: state.Ifid = int(native.Uint32(attr.Value)) } diff --git a/agent/vendor/github.com/vishvananda/netns/.golangci.yml b/agent/vendor/github.com/vishvananda/netns/.golangci.yml new file mode 100644 index 00000000000..600bef78e2b --- /dev/null +++ b/agent/vendor/github.com/vishvananda/netns/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/agent/vendor/github.com/vishvananda/netns/README.md b/agent/vendor/github.com/vishvananda/netns/README.md index 6b45cfb892e..bdfedbe81fa 100644 --- a/agent/vendor/github.com/vishvananda/netns/README.md +++ b/agent/vendor/github.com/vishvananda/netns/README.md @@ -23,6 +23,7 @@ import ( "fmt" "net" "runtime" + "github.com/vishvananda/netns" ) diff --git a/agent/vendor/github.com/vishvananda/netns/doc.go b/agent/vendor/github.com/vishvananda/netns/doc.go new file mode 100644 index 00000000000..cd4093a4d7e --- /dev/null +++ b/agent/vendor/github.com/vishvananda/netns/doc.go @@ -0,0 +1,9 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns diff --git a/agent/vendor/github.com/vishvananda/netns/netns_linux.go b/agent/vendor/github.com/vishvananda/netns/netns_linux.go index c76acd08702..2ed7c7e2fa4 100644 --- a/agent/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/agent/vendor/github.com/vishvananda/netns/netns_linux.go @@ -1,33 +1,31 @@ -// +build linux - package netns import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" - "syscall" "golang.org/x/sys/unix" ) -// Deprecated: use syscall pkg instead (go >= 1.5 needed). +// Deprecated: use golang.org/x/sys/unix pkg instead. const ( - CLONE_NEWUTS = 0x04000000 /* New utsname group? */ - CLONE_NEWIPC = 0x08000000 /* New ipcs */ - CLONE_NEWUSER = 0x10000000 /* New user namespace */ - CLONE_NEWPID = 0x20000000 /* New pid namespace */ - CLONE_NEWNET = 0x40000000 /* New network namespace */ - CLONE_IO = 0x80000000 /* Get io context */ - bindMountPath = "/run/netns" /* Bind mount path for named netns */ + CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */ + CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */ + CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */ + CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */ + CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */ + CLONE_IO = unix.CLONE_IO /* Get io context */ ) -// Setns sets namespace using syscall. Note that this should be a method -// in syscall but it has not been added. +const bindMountPath = "/run/netns" /* Bind mount path for named netns */ + +// Setns sets namespace using golang.org/x/sys/unix.Setns. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. func Setns(ns NsHandle, nstype int) (err error) { return unix.Setns(int(ns), nstype) } @@ -35,19 +33,20 @@ func Setns(ns NsHandle, nstype int) (err error) { // Set sets the current network namespace to the namespace represented // by NsHandle. func Set(ns NsHandle) (err error) { - return Setns(ns, CLONE_NEWNET) + return unix.Setns(int(ns), unix.CLONE_NEWNET) } // New creates a new network namespace, sets it as current and returns // a handle to it. func New() (ns NsHandle, err error) { - if err := unix.Unshare(CLONE_NEWNET); err != nil { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { return -1, err } return Get() } -// NewNamed creates a new named network namespace and returns a handle to it +// NewNamed creates a new named network namespace, sets it as current, +// and returns a handle to it func NewNamed(name string) (NsHandle, error) { if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { err = os.MkdirAll(bindMountPath, 0755) @@ -65,13 +64,15 @@ func NewNamed(name string) (NsHandle, error) { f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) if err != nil { + newNs.Close() return None(), err } f.Close() - nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()) - err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "") + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "") if err != nil { + newNs.Close() return None(), err } @@ -82,7 +83,7 @@ func NewNamed(name string) (NsHandle, error) { func DeleteNamed(name string) error { namedPath := path.Join(bindMountPath, name) - err := syscall.Unmount(namedPath, syscall.MNT_DETACH) + err := unix.Unmount(namedPath, unix.MNT_DETACH) if err != nil { return err } @@ -108,7 +109,7 @@ func GetFromPath(path string) (NsHandle, error) { // GetFromName gets a handle to a named network namespace such as one // created by `ip netns add`. func GetFromName(name string) (NsHandle, error) { - return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name)) + return GetFromPath(filepath.Join(bindMountPath, name)) } // GetFromPid gets a handle to the network namespace of a given pid. @@ -133,33 +134,38 @@ func GetFromDocker(id string) (NsHandle, error) { } // borrowed from docker/utils/utils.go -func findCgroupMountpoint(cgroupType string) (string, error) { - output, err := ioutil.ReadFile("/proc/mounts") +func findCgroupMountpoint(cgroupType string) (int, string, error) { + output, err := os.ReadFile("/proc/mounts") if err != nil { - return "", err + return -1, "", err } // /proc/mounts has 6 fields per line, one mount per line, e.g. // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, " ") - if len(parts) == 6 && parts[2] == "cgroup" { - for _, opt := range strings.Split(parts[3], ",") { - if opt == cgroupType { - return parts[1], nil + if len(parts) == 6 { + switch parts[2] { + case "cgroup2": + return 2, parts[1], nil + case "cgroup": + for _, opt := range strings.Split(parts[3], ",") { + if opt == cgroupType { + return 1, parts[1], nil + } } } } } - return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) + return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) } // Returns the relative path to the cgroup docker is running in. // borrowed from docker/utils/utils.go // modified to get the docker pid instead of using /proc/self -func getThisCgroup(cgroupType string) (string, error) { - dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") +func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { + dockerpid, err := os.ReadFile("/var/run/docker.pid") if err != nil { return "", err } @@ -171,14 +177,15 @@ func getThisCgroup(cgroupType string) (string, error) { if err != nil { return "", err } - output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) if err != nil { return "", err } for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, ":") // any type used by docker should work - if parts[1] == cgroupType { + if (cgroupVer == 1 && parts[1] == cgroupType) || + (cgroupVer == 2 && parts[1] == "") { return parts[2], nil } } @@ -190,40 +197,56 @@ func getThisCgroup(cgroupType string) (string, error) { // modified to only return the first pid // modified to glob with id // modified to search for newer docker containers +// modified to look for cgroups v2 func getPidForContainer(id string) (int, error) { pid := 0 // memory is chosen randomly, any cgroup used by docker works cgroupType := "memory" - cgroupRoot, err := findCgroupMountpoint(cgroupType) + cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType) if err != nil { return pid, err } - cgroupThis, err := getThisCgroup(cgroupType) + cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType) if err != nil { return pid, err } id += "*" + var pidFile string + if cgroupVer == 1 { + pidFile = "tasks" + } else if cgroupVer == 2 { + pidFile = "cgroup.procs" + } else { + return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer) + } + attempts := []string{ - filepath.Join(cgroupRoot, cgroupThis, id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, id, pidFile), // With more recent lxc versions use, cgroup will be in lxc/ - filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile), // With more recent docker, cgroup will be in docker/ - filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile), // Even more recent docker versions under systemd use docker-.scope/ - filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile), // Even more recent docker versions under cgroup/systemd/docker// - filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"), - // Kubernetes with docker and CNI is even more different - filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"), - // Another flavor of containers location in recent kubernetes 1.11+ - filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "kubepods-besteffort.slice", "*", "docker-"+id+".scope", "tasks"), - // When runs inside of a container with recent kubernetes 1.11+ - filepath.Join(cgroupRoot, "kubepods.slice", "kubepods-besteffort.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile), + // Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile), + // Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), + // When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), } var filename string @@ -241,7 +264,7 @@ func getPidForContainer(id string) (int, error) { return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) } - output, err := ioutil.ReadFile(filename) + output, err := os.ReadFile(filename) if err != nil { return pid, err } diff --git a/agent/vendor/github.com/vishvananda/netns/netns_unspecified.go b/agent/vendor/github.com/vishvananda/netns/netns_others.go similarity index 63% rename from agent/vendor/github.com/vishvananda/netns/netns_unspecified.go rename to agent/vendor/github.com/vishvananda/netns/netns_others.go index d06af62b68a..0489837741b 100644 --- a/agent/vendor/github.com/vishvananda/netns/netns_unspecified.go +++ b/agent/vendor/github.com/vishvananda/netns/netns_others.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netns @@ -10,6 +11,14 @@ var ( ErrNotImplemented = errors.New("not implemented") ) +// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It +// is not implemented on other platforms. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return ErrNotImplemented +} + func Set(ns NsHandle) (err error) { return ErrNotImplemented } @@ -18,6 +27,14 @@ func New() (ns NsHandle, err error) { return -1, ErrNotImplemented } +func NewNamed(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func DeleteNamed(name string) error { + return ErrNotImplemented +} + func Get() (NsHandle, error) { return -1, ErrNotImplemented } diff --git a/agent/vendor/github.com/vishvananda/netns/netns.go b/agent/vendor/github.com/vishvananda/netns/nshandle_linux.go similarity index 75% rename from agent/vendor/github.com/vishvananda/netns/netns.go rename to agent/vendor/github.com/vishvananda/netns/nshandle_linux.go index 116befd548c..1baffb66acb 100644 --- a/agent/vendor/github.com/vishvananda/netns/netns.go +++ b/agent/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -1,11 +1,3 @@ -// Package netns allows ultra-simple network namespace handling. NsHandles -// can be retrieved and set. Note that the current namespace is thread -// local so actions that set and reset namespaces should use LockOSThread -// to make sure the namespace doesn't change due to a goroutine switch. -// It is best to close NsHandles when you are done with them. This can be -// accomplished via a `defer ns.Close()` on the handle. Changing namespaces -// requires elevated privileges, so in most cases this code needs to be run -// as root. package netns import ( @@ -38,7 +30,7 @@ func (ns NsHandle) Equal(other NsHandle) bool { // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { if ns == -1 { - return "NS(None)" + return "NS(none)" } var s unix.Stat_t if err := unix.Fstat(int(ns), &s); err != nil { @@ -71,7 +63,7 @@ func (ns *NsHandle) Close() error { if err := unix.Close(int(*ns)); err != nil { return err } - (*ns) = -1 + *ns = -1 return nil } diff --git a/agent/vendor/github.com/vishvananda/netns/nshandle_others.go b/agent/vendor/github.com/vishvananda/netns/nshandle_others.go new file mode 100644 index 00000000000..af727bc091c --- /dev/null +++ b/agent/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package netns + +// NsHandle is a handle to a network namespace. It can only be used on Linux, +// but provides stub methods on other platforms. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. It is only implemented on Linux. +func (ns NsHandle) Equal(_ NsHandle) bool { + return false +} + +// String shows the file descriptor number and its dev and inode. +// It is only implemented on Linux, and returns "NS(none)" on other +// platforms. +func (ns NsHandle) String() string { + return "NS(none)" +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. It is only implemented on Linux, +// and returns "NS(none)" on other platforms. +func (ns NsHandle) UniqueId() string { + return "NS(none)" +} + +// IsOpen returns true if Close() has not been called. It is only implemented +// on Linux and always returns false on other platforms. +func (ns NsHandle) IsOpen() bool { + return false +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is only implemented on Linux. +func (ns *NsHandle) Close() error { + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/agent/vendor/go.etcd.io/bbolt/.gitignore b/agent/vendor/go.etcd.io/bbolt/.gitignore index 3bcd8cbaf02..18312f0043a 100644 --- a/agent/vendor/go.etcd.io/bbolt/.gitignore +++ b/agent/vendor/go.etcd.io/bbolt/.gitignore @@ -3,3 +3,5 @@ *.swp /bin/ cover.out +/.idea +*.iml diff --git a/agent/vendor/go.etcd.io/bbolt/.travis.yml b/agent/vendor/go.etcd.io/bbolt/.travis.yml index 257dfdfee48..452601e49d6 100644 --- a/agent/vendor/go.etcd.io/bbolt/.travis.yml +++ b/agent/vendor/go.etcd.io/bbolt/.travis.yml @@ -4,9 +4,10 @@ go_import_path: go.etcd.io/bbolt sudo: false go: -- 1.12 +- 1.15 before_install: +- go get -v golang.org/x/sys/unix - go get -v honnef.co/go/tools/... - go get -v github.com/kisielk/errcheck diff --git a/agent/vendor/go.etcd.io/bbolt/Makefile b/agent/vendor/go.etcd.io/bbolt/Makefile index 2968aaa61dd..21ecf48f610 100644 --- a/agent/vendor/go.etcd.io/bbolt/Makefile +++ b/agent/vendor/go.etcd.io/bbolt/Makefile @@ -2,8 +2,6 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" -default: build - race: @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" @echo "array freelist test" diff --git a/agent/vendor/go.etcd.io/bbolt/README.md b/agent/vendor/go.etcd.io/bbolt/README.md index c9e64b1a615..f1b4a7b2bf8 100644 --- a/agent/vendor/go.etcd.io/bbolt/README.md +++ b/agent/vendor/go.etcd.io/bbolt/README.md @@ -908,12 +908,14 @@ Below is a list of public, open source projects that use Bolt: * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. * [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. @@ -938,9 +940,8 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. * [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service * [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. diff --git a/agent/vendor/go.etcd.io/bbolt/bolt_unix.go b/agent/vendor/go.etcd.io/bbolt/bolt_unix.go index 2938fed5845..4e5f65ccc84 100644 --- a/agent/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/agent/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -7,6 +7,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/unix" ) // flock acquires an advisory lock on a file descriptor. @@ -49,13 +51,13 @@ func funlock(db *DB) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } // Advise the kernel that the mmap is accessed randomly. - err = madvise(b, syscall.MADV_RANDOM) + err = unix.Madvise(b, syscall.MADV_RANDOM) if err != nil && err != syscall.ENOSYS { // Ignore not implemented error in kernel because it still works. return fmt.Errorf("madvise: %s", err) @@ -76,18 +78,9 @@ func munmap(db *DB) error { } // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) + err := unix.Munmap(db.dataref) db.dataref = nil db.data = nil db.datasz = 0 return err } - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/agent/vendor/go.etcd.io/bbolt/compact.go b/agent/vendor/go.etcd.io/bbolt/compact.go new file mode 100644 index 00000000000..e4fe91b046d --- /dev/null +++ b/agent/vendor/go.etcd.io/bbolt/compact.go @@ -0,0 +1,114 @@ +package bbolt + +// Compact will create a copy of the source DB and in the destination DB. This may +// reclaim space that the source database no longer has use for. txMaxSize can be +// used to limit the transactions size of this process and may trigger intermittent +// commits. A value of zero will ignore transaction sizes. +// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 +func Compact(dst, src *DB, txMaxSize int64) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > txMaxSize && txMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // Fill the entire page for best compaction. + b.FillPercent = 1.0 + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func walk(db *DB, walkFn walkFunc) error { + return db.View(func(tx *Tx) error { + return tx.ForEach(func(name []byte, b *Bucket) error { + return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} diff --git a/agent/vendor/go.etcd.io/bbolt/db.go b/agent/vendor/go.etcd.io/bbolt/db.go index 80b0095cc34..a798c390a20 100644 --- a/agent/vendor/go.etcd.io/bbolt/db.go +++ b/agent/vendor/go.etcd.io/bbolt/db.go @@ -120,6 +120,12 @@ type DB struct { // of truncate() and fsync() when growing the data file. AllocSize int + // Mlock locks database file in memory when set to true. + // It prevents major page faults, however used memory can't be reclaimed. + // + // Supported only on Unix via mlock/munlock syscalls. + Mlock bool + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -188,6 +194,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.MmapFlags = options.MmapFlags db.NoFreelistSync = options.NoFreelistSync db.FreelistType = options.FreelistType + db.Mlock = options.Mlock // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -337,7 +344,8 @@ func (db *DB) mmap(minsz int) error { } // Ensure the size is at least the minimum size. - var size = int(info.Size()) + fileSize := int(info.Size()) + var size = fileSize if size < minsz { size = minsz } @@ -346,6 +354,13 @@ func (db *DB) mmap(minsz int) error { return err } + if db.Mlock { + // Unlock db memory + if err := db.munlock(fileSize); err != nil { + return err + } + } + // Dereference all mmap references before unmapping. if db.rwtx != nil { db.rwtx.root.dereference() @@ -361,6 +376,13 @@ func (db *DB) mmap(minsz int) error { return err } + if db.Mlock { + // Don't allow swapping of data file + if err := db.mlock(fileSize); err != nil { + return err + } + } + // Save references to the meta pages. db.meta0 = db.page(0).meta() db.meta1 = db.page(1).meta() @@ -422,12 +444,36 @@ func (db *DB) mmapSize(size int) (int, error) { return int(sz), nil } +func (db *DB) munlock(fileSize int) error { + if err := munlock(db, fileSize); err != nil { + return fmt.Errorf("munlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mlock(fileSize int) error { + if err := mlock(db, fileSize); err != nil { + return fmt.Errorf("mlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { + if err := db.munlock(fileSizeFrom); err != nil { + return err + } + if err := db.mlock(fileSizeTo); err != nil { + return err + } + return nil +} + // init creates a new database file and initializes its meta pages. func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) + p := db.pageInBuffer(buf, pgid(i)) p.id = pgid(i) p.flags = metaPageFlag @@ -444,13 +490,13 @@ func (db *DB) init() error { } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) + p := db.pageInBuffer(buf, pgid(2)) p.id = pgid(2) p.flags = freelistPageFlag p.count = 0 // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) + p = db.pageInBuffer(buf, pgid(3)) p.id = pgid(3) p.flags = leafPageFlag p.count = 0 @@ -462,6 +508,7 @@ func (db *DB) init() error { if err := fdatasync(db); err != nil { return err } + db.filesz = len(buf) return nil } @@ -973,6 +1020,12 @@ func (db *DB) grow(sz int) error { if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) } + if db.Mlock { + // unlock old file and lock new one + if err := db.mrelock(db.filesz, sz); err != nil { + return fmt.Errorf("mlock/munlock error: %s", err) + } + } } db.filesz = sz @@ -1064,6 +1117,11 @@ type Options struct { // OpenFile is used to open files. It defaults to os.OpenFile. This option // is useful for writing hermetic tests. OpenFile func(string, int, os.FileMode) (*os.File, error) + + // Mlock locks database file in memory when set to true. + // It prevents potential page faults, however + // used memory can't be reclaimed. (UNIX only) + Mlock bool } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/agent/vendor/go.etcd.io/bbolt/freelist_hmap.go b/agent/vendor/go.etcd.io/bbolt/freelist_hmap.go index 02ef2be0441..dbd67a1e736 100644 --- a/agent/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ b/agent/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -4,7 +4,7 @@ import "sort" // hashmapFreeCount returns count of free pages(hashmap version) func (f *freelist) hashmapFreeCount() int { - // use the forwardmap to get the total count + // use the forwardMap to get the total count count := 0 for _, size := range f.forwardMap { count += int(size) @@ -41,7 +41,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { for pid := range bm { // remove the initial - f.delSpan(pid, uint64(size)) + f.delSpan(pid, size) f.allocs[pid] = txid @@ -51,7 +51,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { f.addSpan(pid+pgid(n), remain) for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+pgid(i)) + delete(f.cache, pid+i) } return pid } diff --git a/agent/vendor/go.etcd.io/bbolt/mlock_unix.go b/agent/vendor/go.etcd.io/bbolt/mlock_unix.go new file mode 100644 index 00000000000..6a6c7b3537e --- /dev/null +++ b/agent/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package bbolt + +import "golang.org/x/sys/unix" + +// mlock locks memory of db file +func mlock(db *DB, fileSize int) error { + sizeToLock := fileSize + if sizeToLock > db.datasz { + // Can't lock more than mmaped slice + sizeToLock = db.datasz + } + if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { + return err + } + return nil +} + +//munlock unlocks memory of db file +func munlock(db *DB, fileSize int) error { + if db.dataref == nil { + return nil + } + + sizeToUnlock := fileSize + if sizeToUnlock > db.datasz { + // Can't unlock more than mmaped slice + sizeToUnlock = db.datasz + } + + if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { + return err + } + return nil +} diff --git a/agent/vendor/go.etcd.io/bbolt/mlock_windows.go b/agent/vendor/go.etcd.io/bbolt/mlock_windows.go new file mode 100644 index 00000000000..b4a36a493db --- /dev/null +++ b/agent/vendor/go.etcd.io/bbolt/mlock_windows.go @@ -0,0 +1,11 @@ +package bbolt + +// mlock locks memory of db file +func mlock(_ *DB, _ int) error { + panic("mlock is supported only on UNIX systems") +} + +//munlock unlocks memory of db file +func munlock(_ *DB, _ int) error { + panic("munlock is supported only on UNIX systems") +} diff --git a/agent/vendor/go.etcd.io/bbolt/tx.go b/agent/vendor/go.etcd.io/bbolt/tx.go index 4b1a64a8b8a..869d4120081 100644 --- a/agent/vendor/go.etcd.io/bbolt/tx.go +++ b/agent/vendor/go.etcd.io/bbolt/tx.go @@ -188,7 +188,6 @@ func (tx *Tx) Commit() error { } // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. if tx.db.StrictMode { ch := tx.Check() var errs []string @@ -393,7 +392,7 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { return err } - err = tx.Copy(f) + _, err = tx.WriteTo(f) if err != nil { _ = f.Close() return err diff --git a/agent/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/agent/vendor/golang.org/x/tools/cmd/stringer/stringer.go new file mode 100644 index 00000000000..998d1a51bfd --- /dev/null +++ b/agent/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -0,0 +1,657 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer +// interface. Given the name of a (signed or unsigned) integer type T that has constants +// defined, stringer will create a new self-contained Go source file implementing +// +// func (t T) String() string +// +// The file is created in the same package and directory as the package that defines T. +// It has helpful defaults designed for use with go generate. +// +// Stringer works best with constants that are consecutive values such as created using iota, +// but creates good code regardless. In the future it might also provide custom support for +// constant sets that are bit patterns. +// +// For example, given this snippet, +// +// package painkiller +// +// type Pill int +// +// const ( +// Placebo Pill = iota +// Aspirin +// Ibuprofen +// Paracetamol +// Acetaminophen = Paracetamol +// ) +// +// running this command +// +// stringer -type=Pill +// +// in the same directory will create the file pill_string.go, in package painkiller, +// containing a definition of +// +// func (Pill) String() string +// +// That method will translate the value of a Pill constant to the string representation +// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will +// print the string "Aspirin". +// +// Typically this process would be run using go generate, like this: +// +// //go:generate stringer -type=Pill +// +// If multiple constants have the same value, the lexically first matching name will +// be used (in the example, Acetaminophen will print as "Paracetamol"). +// +// With no arguments, it processes the package in the current directory. +// Otherwise, the arguments must name a single directory holding a Go package +// or a set of Go source files that represent a single Go package. +// +// The -type flag accepts a comma-separated list of types so a single run can +// generate methods for multiple types. The default output file is t_string.go, +// where t is the lower-cased name of the first type listed. It can be overridden +// with the -output flag. +// +// The -linecomment flag tells stringer to generate the text of any line comment, trimmed +// of leading spaces, instead of the constant name. For instance, if the constants above had a +// Pill prefix, one could write +// +// PillAspirin // Aspirin +// +// to suppress it in the output. +package main // import "golang.org/x/tools/cmd/stringer" + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +var ( + typeNames = flag.String("type", "", "comma-separated list of type names; must be set") + output = flag.String("output", "", "output file name; default srcdir/_string.go") + trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") + linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") + buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") +) + +// Usage is a replacement usage function for the flags package. +func Usage() { + fmt.Fprintf(os.Stderr, "Usage of stringer:\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") + fmt.Fprintf(os.Stderr, "For more information, see:\n") + fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + log.SetFlags(0) + log.SetPrefix("stringer: ") + flag.Usage = Usage + flag.Parse() + if len(*typeNames) == 0 { + flag.Usage() + os.Exit(2) + } + types := strings.Split(*typeNames, ",") + var tags []string + if len(*buildTags) > 0 { + tags = strings.Split(*buildTags, ",") + } + + // We accept either one directory or a list of files. Which do we have? + args := flag.Args() + if len(args) == 0 { + // Default: process whole package in current directory. + args = []string{"."} + } + + // Parse the package once. + var dir string + g := Generator{ + trimPrefix: *trimprefix, + lineComment: *linecomment, + } + // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). + if len(args) == 1 && isDirectory(args[0]) { + dir = args[0] + } else { + if len(tags) != 0 { + log.Fatal("-tags option applies only to directories, not when files are specified") + } + dir = filepath.Dir(args[0]) + } + + g.parsePackage(args, tags) + + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for each type. + for _, typeName := range types { + g.generate(typeName) + } + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + baseName := fmt.Sprintf("%s_string.go", types[0]) + outputName = filepath.Join(dir, strings.ToLower(baseName)) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// isDirectory reports whether the named file is a directory. +func isDirectory(name string) bool { + info, err := os.Stat(name) + if err != nil { + log.Fatal(err) + } + return info.IsDir() +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + buf bytes.Buffer // Accumulated output. + pkg *Package // Package we are scanning. + + trimPrefix string + lineComment bool +} + +func (g *Generator) Printf(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, format, args...) +} + +// File holds a single parsed file and associated data. +type File struct { + pkg *Package // Package to which this file belongs. + file *ast.File // Parsed AST. + // These fields are reset for each type being generated. + typeName string // Name of the constant type. + values []Value // Accumulator for constant values of that type. + + trimPrefix string + lineComment bool +} + +type Package struct { + name string + defs map[*ast.Ident]types.Object + files []*File +} + +// parsePackage analyzes the single package constructed from the patterns and tags. +// parsePackage exits if there is an error. +func (g *Generator) parsePackage(patterns []string, tags []string) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + // TODO: Need to think about constants in test files. Maybe write type_string_test.go + // in a separate pass? For later. + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + log.Fatal(err) + } + if len(pkgs) != 1 { + log.Fatalf("error: %d packages found", len(pkgs)) + } + g.addPackage(pkgs[0]) +} + +// addPackage adds a type checked Package and its syntax files to the generator. +func (g *Generator) addPackage(pkg *packages.Package) { + g.pkg = &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for i, file := range pkg.Syntax { + g.pkg.files[i] = &File{ + file: file, + pkg: g.pkg, + trimPrefix: g.trimPrefix, + lineComment: g.lineComment, + } + } +} + +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string) { + values := make([]Value, 0, 100) + for _, file := range g.pkg.files { + // Set the state for this run of the walker. + file.typeName = typeName + file.values = nil + if file.file != nil { + ast.Inspect(file.file, file.genDecl) + values = append(values, file.values...) + } + } + + if len(values) == 0 { + log.Fatalf("no values defined for type %s", typeName) + } + // Generate code that will fail if the constants change value. + g.Printf("func _() {\n") + g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") + g.Printf("\t// Re-run the stringer command to generate them again.\n") + g.Printf("\tvar x [1]struct{}\n") + for _, v := range values { + g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) + } + g.Printf("}\n") + runs := splitIntoRuns(values) + // The decision of which pattern to use depends on the number of + // runs in the numbers. If there's only one, it's easy. For more than + // one, there's a tradeoff between complexity and size of the data + // and code vs. the simplicity of a map. A map takes more space, + // but so does the code. The decision here (crossover at 10) is + // arbitrary, but considers that for large numbers of runs the cost + // of the linear scan in the switch might become important, and + // rather than use yet another algorithm such as binary search, + // we punt and use a map. In any case, the likelihood of a map + // being necessary for any realistic example other than bitmasks + // is very low. And bitmasks probably deserve their own analysis, + // to be done some other day. + switch { + case len(runs) == 1: + g.buildOneRun(runs, typeName) + case len(runs) <= 10: + g.buildMultipleRuns(runs, typeName) + default: + g.buildMap(runs, typeName) + } +} + +// splitIntoRuns breaks the values into runs of contiguous sequences. +// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. +// The input slice is known to be non-empty. +func splitIntoRuns(values []Value) [][]Value { + // We use stable sort so the lexically first name is chosen for equal elements. + sort.Stable(byValue(values)) + // Remove duplicates. Stable sort has put the one we want to print first, + // so use that one. The String method won't care about which named constant + // was the argument, so the first name for the given value is the only one to keep. + // We need to do this because identical values would cause the switch or map + // to fail to compile. + j := 1 + for i := 1; i < len(values); i++ { + if values[i].value != values[i-1].value { + values[j] = values[i] + j++ + } + } + values = values[:j] + runs := make([][]Value, 0, 10) + for len(values) > 0 { + // One contiguous sequence per outer loop. + i := 1 + for i < len(values) && values[i].value == values[i-1].value+1 { + i++ + } + runs = append(runs, values[:i]) + values = values[i:] + } + return runs +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.buf.Bytes() + } + return src +} + +// Value represents a declared constant. +type Value struct { + originalName string // The name of the constant. + name string // The name with trimmed prefix. + // The value is stored as a bit pattern alone. The boolean tells us + // whether to interpret it as an int64 or a uint64; the only place + // this matters is when sorting. + // Much of the time the str field is all we need; it is printed + // by Value.String. + value uint64 // Will be converted to int64 when needed. + signed bool // Whether the constant is a signed type. + str string // The string representation given by the "go/constant" package. +} + +func (v *Value) String() string { + return v.str +} + +// byValue lets us sort the constants into increasing order. +// We take care in the Less method to sort in signed or unsigned order, +// as appropriate. +type byValue []Value + +func (b byValue) Len() int { return len(b) } +func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byValue) Less(i, j int) bool { + if b[i].signed { + return int64(b[i].value) < int64(b[j].value) + } + return b[i].value < b[j].value +} + +// genDecl processes one declaration clause. +func (f *File) genDecl(node ast.Node) bool { + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Tok != token.CONST { + // We only care about const declarations. + return true + } + // The name of the type of the constants we are declaring. + // Can change if this is a multi-element declaration. + typ := "" + // Loop over the elements of the declaration. Each element is a ValueSpec: + // a list of names possibly followed by a type, possibly followed by values. + // If the type and value are both missing, we carry down the type (and value, + // but the "go/types" package takes care of that). + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. + if vspec.Type == nil && len(vspec.Values) > 0 { + // "X = 1". With no type but a value. If the constant is untyped, + // skip this vspec and reset the remembered type. + typ = "" + + // If this is a simple type conversion, remember the type. + // We don't mind if this is actually a call; a qualified call won't + // be matched (that will be SelectorExpr, not Ident), and only unusual + // situations will result in a function call that appears to be + // a type conversion. + ce, ok := vspec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + id, ok := ce.Fun.(*ast.Ident) + if !ok { + continue + } + typ = id.Name + } + if vspec.Type != nil { + // "X T". We have a type. Remember it. + ident, ok := vspec.Type.(*ast.Ident) + if !ok { + continue + } + typ = ident.Name + } + if typ != f.typeName { + // This is not the type we're looking for. + continue + } + // We now have a list of names (from one line of source code) all being + // declared with the desired type. + // Grab their names and actual values and store them in f.values. + for _, name := range vspec.Names { + if name.Name == "_" { + continue + } + // This dance lets the type checker find the values for us. It's a + // bit tricky: look up the object declared by the name, find its + // types.Const, and extract its value. + obj, ok := f.pkg.defs[name] + if !ok { + log.Fatalf("no value for constant %s", name) + } + info := obj.Type().Underlying().(*types.Basic).Info() + if info&types.IsInteger == 0 { + log.Fatalf("can't handle non-integer constant type %s", typ) + } + value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. + if value.Kind() != constant.Int { + log.Fatalf("can't happen: constant is not an integer %s", name) + } + i64, isInt := constant.Int64Val(value) + u64, isUint := constant.Uint64Val(value) + if !isInt && !isUint { + log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) + } + if !isInt { + u64 = uint64(i64) + } + v := Value{ + originalName: name.Name, + value: u64, + signed: info&types.IsUnsigned == 0, + str: value.String(), + } + if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { + v.name = strings.TrimSpace(c.Text()) + } else { + v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) + } + f.values = append(f.values, v) + } + } + return false +} + +// Helpers + +// usize returns the number of bits of the smallest unsigned integer +// type that will hold n. Used to create the smallest possible slice of +// integers to use as indexes into the concatenated strings. +func usize(n int) int { + switch { + case n < 1<<8: + return 8 + case n < 1<<16: + return 16 + default: + // 2^32 is enough constants for anyone. + return 32 + } +} + +// declareIndexAndNameVars declares the index slices and concatenated names +// strings representing the runs of values. +func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { + var indexes, names []string + for i, run := range runs { + index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) + if len(run) != 1 { + indexes = append(indexes, index) + } + names = append(names, name) + } + g.Printf("const (\n") + for _, name := range names { + g.Printf("\t%s\n", name) + } + g.Printf(")\n\n") + + if len(indexes) > 0 { + g.Printf("var (") + for _, index := range indexes { + g.Printf("\t%s\n", index) + } + g.Printf(")\n\n") + } +} + +// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars +func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { + index, name := g.createIndexAndNameDecl(run, typeName, "") + g.Printf("const %s\n", name) + g.Printf("var %s\n", index) +} + +// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". +func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { + b := new(bytes.Buffer) + indexes := make([]int, len(run)) + for i := range run { + b.WriteString(run[i].name) + indexes[i] = b.Len() + } + nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) + nameLen := b.Len() + b.Reset() + fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) + for i, v := range indexes { + if i > 0 { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "%d", v) + } + fmt.Fprintf(b, "}") + return b.String(), nameConst +} + +// declareNameVars declares the concatenated names string representing all the values in the runs. +func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { + g.Printf("const _%s_name%s = \"", typeName, suffix) + for _, run := range runs { + for i := range run { + g.Printf("%s", run[i].name) + } + } + g.Printf("\"\n") +} + +// buildOneRun generates the variables and String method for a single run of contiguous values. +func (g *Generator) buildOneRun(runs [][]Value, typeName string) { + values := runs[0] + g.Printf("\n") + g.declareIndexAndNameVar(values, typeName) + // The generated code is simple enough to write as a Printf format. + lessThanZero := "" + if values[0].signed { + lessThanZero = "i < 0 || " + } + if values[0].value == 0 { // Signed or unsigned, 0 is still 0. + g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) + } else { + g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) + } +} + +// Arguments to format are: +// +// [1]: type name +// [2]: size of index element (8 for uint8 etc.) +// [3]: less than zero check (for signed types) +const stringOneRun = `func (i %[1]s) String() string { + if %[3]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] +} +` + +// Arguments to format are: +// [1]: type name +// [2]: lowest defined value for type, as a string +// [3]: size of index element (8 for uint8 etc.) +// [4]: less than zero check (for signed types) +/* + */ +const stringOneRunWithOffset = `func (i %[1]s) String() string { + i -= %[2]s + if %[4]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] +} +` + +// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. +// For this pattern, a single Printf format won't do. +func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareIndexAndNameVars(runs, typeName) + g.Printf("func (i %s) String() string {\n", typeName) + g.Printf("\tswitch {\n") + for i, values := range runs { + if len(values) == 1 { + g.Printf("\tcase i == %s:\n", &values[0]) + g.Printf("\t\treturn _%s_name_%d\n", typeName, i) + continue + } + if values[0].value == 0 && !values[0].signed { + // For an unsigned lower bound of 0, "0 <= i" would be redundant. + g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) + } else { + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + } + if values[0].value != 0 { + g.Printf("\t\ti -= %s\n", &values[0]) + } + g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", + typeName, i, typeName, i, typeName, i) + } + g.Printf("\tdefault:\n") + g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) + g.Printf("\t}\n") + g.Printf("}\n") +} + +// buildMap handles the case where the space is so sparse a map is a reasonable fallback. +// It's a rare situation but has simple code. +func (g *Generator) buildMap(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareNameVars(runs, typeName, "") + g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) + n := 0 + for _, values := range runs { + for _, value := range values { + g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) + n += len(value.name) + } + } + g.Printf("}\n\n") + g.Printf(stringMap, typeName) +} + +// Argument to format is the type name. +const stringMap = `func (i %[1]s) String() string { + if str, ok := _%[1]s_map[i]; ok { + return str + } + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" +} +` diff --git a/agent/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/agent/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 00000000000..03543bd4bb8 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,186 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/agent/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/agent/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 00000000000..37a7247e268 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/agent/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/agent/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 00000000000..18a002f82a1 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "go/types" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +var debug = false + +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return nil, enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, friendlyErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} diff --git a/agent/vendor/golang.org/x/tools/go/packages/doc.go b/agent/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 00000000000..da4ab89fe63 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/agent/vendor/golang.org/x/tools/go/packages/external.go b/agent/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 00000000000..7242a0a7d2b --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + exec "golang.org/x/sys/execabs" + "os" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/agent/vendor/golang.org/x/tools/go/packages/golist.go b/agent/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 00000000000..58230038a7c --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1183 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } + + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + return response.dr, nil +} + +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := state.createDriverResponse(pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return err + } + return state.addNeededOverlayPackages(response, needPkgs) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &driverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := ioutil.TempDir("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := ioutil.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/agent/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/agent/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 00000000000..9576b472f9c --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,575 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + pkgOfDir := make(map[string][]*Package) + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + dir, err := commonDir(pkg.GoFiles) + if err != nil { + return nil, nil, err + } + if dir != "" { + pkgOfDir[dir] = append(pkgOfDir[dir], pkg) + } + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } + if !ok { + break + } + var forTest string // only set for x tests + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + forTest = pkgPath + pkgPath += "_test" + } + id := pkgPath + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + } + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest + } + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err + } + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) + } + return state.resolveImport(sourceDir, id) + } + + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// resolveImport finds the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} + +// commonDir returns the directory that all files are in, "" if files is empty, +// or an error if they aren't in the same directory. +func commonDir(files []string) (string, error) { + seen := make(map[string]bool) + for _, f := range files { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) + } + for k := range seen { + // seen has only one element; return it. + return k, nil + } + return "", nil // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/agent/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/agent/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 00000000000..5c080d21b54 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/agent/vendor/golang.org/x/tools/go/packages/packages.go b/agent/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 00000000000..da1a27eea62 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1332 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *driverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + typeparams.InitInstanceInfo(lpkg.TypesInfo) + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData ensures that type information is present for the specified +// package, loading it from an export data file on the first request. +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + return nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/agent/vendor/golang.org/x/tools/go/packages/visit.go b/agent/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 00000000000..a1dcc40b727 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/agent/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/agent/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 00000000000..c725d839ba1 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,824 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "sort" + "strconv" + "strings" + _ "unsafe" + + "golang.org/x/tools/internal/typeparams" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + skipMethodSorting bool +} + +// Exposed to gopls via golang.org/x/tools/internal/typesinternal +// TODO(golang/go#61443): eliminate this parameter one way or the other. +// +//go:linkname skipMethodSorting +func skipMethodSorting(enc *Encoder) { + enc.skipMethodSorting = true +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + if !enc.skipMethodSorting { + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } else { + // This branch must match the logic in the branch above, using go/types + // APIs without sorting. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + if !enc.skipMethodSorting { + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } else { + // This branch must match the logic of the branch above, using go/types + // APIs without sorting. + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *typeparams.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + return object(pkg, p, false) +} + +// Note: the skipMethodSorting parameter must match the value of +// Encoder.skipMethodSorting used during encoding. +func object(pkg *types.Package, p Path, skipMethodSorting bool) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *typeparams.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*typeparams.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + if skipMethodSorting { + obj = t.Method(index) + } else { + methods := namedMethods(t) // (unmemoized) + obj = methods[index] // Id-ordered + } + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 00000000000..d98b0db2a9a --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sync" +) + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go new file mode 100644 index 00000000000..f6437feb1cf --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -0,0 +1,99 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 + } + + return +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 00000000000..b1223713b94 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,274 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 00000000000..6103dd7102b --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1322 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/tokeninternal" + "golang.org/x/tools/internal/typeparams" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { + const bundle = false + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } + io.Copy(out, &p.data0) + + return nil +} + +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + +func (p *iexporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } + } + + // Function. + if typeparams.ForSignature(sig).Len() == 0 { + w.tag('F') + } else { + w.tag('G') + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := t.(*typeparams.TypeParam); ok { + w.tag('P') + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := constraint.(*types.Interface); iface != nil { + implicit = typeparams.IsImplicit(iface) + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if typeparams.ForNamed(named).Len() == 0 { + w.tag('T') + } else { + w.tag('U') + } + w.pos(obj.Pos()) + + if typeparams.ForNamed(named).Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + } + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *types.Named: + if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(typeparams.NamedTypeOrigin(t), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *typeparams.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.pkg(pkg) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg + if n > 0 { + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } + } + w.pkg(fieldPkg) + w.uint64(uint64(n)) + + for i := 0; i < n; i++ { + f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), fieldPkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.pkg(pkg) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := ft.(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *typeparams.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := 0; i < nt; i++ { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return + } + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 00000000000..8e64cf644fc --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,1083 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "sort" + "strings" + + "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/typeparams" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) +} + +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if shallow { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + shallow: shallow, + reportf: reportf, + + stringData: stringData, + stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + for i := range items { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + items[i].nameIndex = nameIndex + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the 'P' case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + typeparams.SetTypeParamConstraint(d.t, d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *typeparams.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + shallow bool + reportf ReportFunc // if non-nil, used to report bugs + + stringData []byte + stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := rhs.(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F', 'G': + var tparams []*typeparams.TypeParam + if tag == 'G' { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T', 'U': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == 'U' { + tparams := r.tparamList() + typeparams.SetForNamed(named, tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + base := baseType(recv.Type()) + assert(base != nil) + targs := typeparams.NamedTypeArgs(base) + var rparams []*typeparams.TypeParam + if targs.Len() > 0 { + rparams = make([]*typeparams.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = targs.At(i).(*typeparams.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'P': + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := typeparams.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := constraint.(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + typeparams.MarkImplicit(iface) + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.shallow { + // precise offsets are encoded only in shallow mode + return r.posv2() + } + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %s)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + msig := r.signature(recv, nil, nil) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := typeparams.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*typeparams.Term, r.uint64()) + for i := range terms { + terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + } + return typeparams.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*typeparams.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*typeparams.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = r.typ().(*typeparams.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} + +func baseType(typ types.Type) *types.Named { + // pointer receivers are never types.Named types + if p, _ := typ.(*types.Pointer); p != nil { + typ = p.Elem() + } + // receiver base types are always (possibly generic) types.Named types + n, _ := typ.(*types.Named) + return n +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go new file mode 100644 index 00000000000..8b163e3d058 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go new file mode 100644 index 00000000000..49984f40fd8 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go new file mode 100644 index 00000000000..d892273efb6 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGo1_11 + +func additionalPredeclared() []types.Type { + return nil +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 00000000000..edbe6ea7041 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,37 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 00000000000..286bf445483 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 00000000000..b5d69ffbe68 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go new file mode 100644 index 00000000000..8eb20729c2a --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 00000000000..b977435f626 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,728 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/agent/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/agent/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 00000000000..d9950b1f0be --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import ( + "golang.org/x/tools/internal/gocommand" +) + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/codes.go new file mode 100644 index 00000000000..f0cabde96eb --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 00000000000..b92e8e6eb32 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,517 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Uint64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/doc.go new file mode 100644 index 00000000000..c8a2796b5e4 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/encoder.go new file mode 100644 index 00000000000..6482617a4fc --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -0,0 +1,383 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Uint encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/flags.go new file mode 100644 index 00000000000..654222745fa --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go new file mode 100644 index 00000000000..5294f6a63ed --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go new file mode 100644 index 00000000000..2324ae7adfe --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/reloc.go new file mode 100644 index 00000000000..fcdfb97ca99 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/support.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/support.go new file mode 100644 index 00000000000..ad26d3b28ca --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/sync.go new file mode 100644 index 00000000000..5bd51ef7170 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/agent/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/agent/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go new file mode 100644 index 00000000000..4a5b0ca5f2f --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/agent/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/agent/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go new file mode 100644 index 00000000000..7e638ec24fc --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -0,0 +1,151 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package tokeninternal provides access to some internal features of the token +// package. +package tokeninternal + +import ( + "fmt" + "go/token" + "sort" + "sync" + "unsafe" +) + +// GetLines returns the table of line-start offsets from a token.File. +func GetLines(file *token.File) []int { + // token.File has a Lines method on Go 1.21 and later. + if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { + return file.Lines() + } + + // This declaration must match that of token.File. + // This creates a risk of dependency skew. + // For now we check that the size of the two + // declarations is the same, on the (fragile) assumption + // that future changes would add fields. + type tokenFile119 struct { + _ string + _ int + _ int + mu sync.Mutex // we're not complete monsters + lines []int + _ []struct{} + } + type tokenFile118 struct { + _ *token.FileSet // deleted in go1.19 + tokenFile119 + } + + type uP = unsafe.Pointer + switch unsafe.Sizeof(*file) { + case unsafe.Sizeof(tokenFile118{}): + var ptr *tokenFile118 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + case unsafe.Sizeof(tokenFile119{}): + var ptr *tokenFile119 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + default: + panic("unexpected token.File size") + } +} + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 00000000000..07484073a57 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1560 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNilUse + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrent number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors inolving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + +) diff --git a/agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 00000000000..15ecf7c5ded --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,179 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNilUse-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] +} + +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) + +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) + +func (i ErrorCode) String() string { + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/agent/vendor/golang.org/x/tools/internal/typesinternal/types.go b/agent/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 00000000000..66e8b099bd6 --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,68 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" + + "golang.org/x/tools/go/types/objectpath" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +// ReadGo116ErrorData extracts additional information from types.Error values +// generated by Go version 1.16 and later: the error code, start position, and +// end position. If all positions are valid, start <= err.Pos <= end. +// +// If the data could not be read, the final result parameter will be false. +func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(err) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} + +var SetGoVersion = func(conf *types.Config, version string) bool { return false } + +// SkipEncoderMethodSorting marks the encoder as not requiring sorted methods, +// as an optimization for gopls (which guarantees the order of parsed source files). +// +// TODO(golang/go#61443): eliminate this parameter one way or the other. +// +//go:linkname SkipEncoderMethodSorting golang.org/x/tools/go/types/objectpath.skipMethodSorting +func SkipEncoderMethodSorting(enc *objectpath.Encoder) + +// ObjectpathObject is like objectpath.Object, but allows suppressing method +// sorting (which is not necessary for gopls). +// +//go:linkname ObjectpathObject golang.org/x/tools/go/types/objectpath.object +func ObjectpathObject(pkg *types.Package, p objectpath.Path, skipMethodSorting bool) (types.Object, error) diff --git a/agent/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/agent/vendor/golang.org/x/tools/internal/typesinternal/types_118.go new file mode 100644 index 00000000000..a42b072a67d --- /dev/null +++ b/agent/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typesinternal + +import ( + "go/types" +) + +func init() { + SetGoVersion = func(conf *types.Config, version string) bool { + conf.GoVersion = version + return true + } +} diff --git a/agent/vendor/modules.txt b/agent/vendor/modules.txt index 6e3342013d8..38b86259049 100644 --- a/agent/vendor/modules.txt +++ b/agent/vendor/modules.txt @@ -1,8 +1,9 @@ -# github.com/Microsoft/go-winio v0.5.2 -## explicit; go 1.13 +# github.com/Microsoft/go-winio v0.6.0 +## explicit; go 1.17 github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/pkg/guid -# github.com/Microsoft/hcsshim v0.8.25 +# github.com/Microsoft/hcsshim v0.9.9 ## explicit; go 1.13 github.com/Microsoft/hcsshim/osversion # github.com/aws/amazon-ecs-agent/ecs-agent v0.0.0 => ../ecs-agent @@ -135,12 +136,13 @@ github.com/cihub/seelog/archive github.com/cihub/seelog/archive/gzip github.com/cihub/seelog/archive/tar github.com/cihub/seelog/archive/zip -# github.com/cilium/ebpf v0.6.2 -## explicit; go 1.15 +# github.com/cilium/ebpf v0.9.1 +## explicit; go 1.17 github.com/cilium/ebpf github.com/cilium/ebpf/asm +github.com/cilium/ebpf/btf github.com/cilium/ebpf/internal -github.com/cilium/ebpf/internal/btf +github.com/cilium/ebpf/internal/sys github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link # github.com/containerd/cgroups v1.0.4 @@ -166,7 +168,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v0.9.1 ## explicit; go 1.14 github.com/containernetworking/plugins/pkg/ns -# github.com/coreos/go-systemd/v22 v22.3.2 +# github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/dbus # github.com/davecgh/go-spew v1.1.1 @@ -221,7 +223,7 @@ github.com/docker/go-units # github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/godbus/dbus/v5 v5.0.6 +# github.com/godbus/dbus/v5 v5.1.0 ## explicit; go 1.12 github.com/godbus/dbus/v5 # github.com/gogo/protobuf v1.3.2 @@ -229,7 +231,7 @@ github.com/godbus/dbus/v5 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor -# github.com/golang/mock v1.4.1 +# github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock github.com/golang/mock/mockgen/model @@ -267,14 +269,6 @@ github.com/moby/sys/mount github.com/moby/sys/mountinfo # github.com/moby/term v0.0.0-20221205130635-1aeaba878587 ## explicit; go 1.18 -# github.com/morikuni/aec v1.0.0 -## explicit -# github.com/onsi/ginkgo v1.16.5 -## explicit; go 1.16 -# github.com/onsi/ginkgo/v2 v2.7.0 -## explicit; go 1.18 -# github.com/onsi/gomega v1.25.0 -## explicit; go 1.18 # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -315,22 +309,22 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/sirupsen/logrus v1.8.1 +# github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 +# github.com/vishvananda/netlink v1.2.1-beta.2 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae -## explicit; go 1.12 +# github.com/vishvananda/netns v0.0.4 +## explicit; go 1.17 github.com/vishvananda/netns -# go.etcd.io/bbolt v1.3.5 +# go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt # golang.org/x/mod v0.12.0 @@ -370,7 +364,12 @@ golang.org/x/text/unicode/norm golang.org/x/time/rate # golang.org/x/tools v0.12.0 ## explicit; go 1.18 +golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -378,10 +377,15 @@ golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal # google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status diff --git a/ecs-agent/go.mod b/ecs-agent/go.mod index 22fc35d4e61..511e1b5c7ac 100644 --- a/ecs-agent/go.mod +++ b/ecs-agent/go.mod @@ -3,22 +3,26 @@ module github.com/aws/amazon-ecs-agent/ecs-agent go 1.19 require ( + github.com/Microsoft/hcsshim v0.9.9 github.com/aws/aws-sdk-go v1.36.0 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/container-storage-interface/spec v1.8.0 + github.com/containernetworking/cni v1.1.2 + github.com/containernetworking/plugins v0.9.1 github.com/didip/tollbooth v4.0.2+incompatible github.com/docker/docker v20.10.24+incompatible - github.com/golang/mock v1.4.1 + github.com/golang/mock v1.6.0 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/pborman/uuid v1.2.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.26.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.1 golang.org/x/net v0.8.0 - golang.org/x/sys v0.6.0 - golang.org/x/tools v0.6.0 + golang.org/x/sys v0.7.0 + golang.org/x/tools v0.7.0 google.golang.org/grpc v1.53.0 k8s.io/apimachinery v0.26.0-alpha.0 k8s.io/klog/v2 v2.100.1 @@ -27,14 +31,16 @@ require ( ) require ( + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/containerd/cgroups v1.0.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect @@ -42,6 +48,8 @@ require ( github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + go.opencensus.io v0.24.0 // indirect golang.org/x/mod v0.10.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect diff --git a/ecs-agent/go.sum b/ecs-agent/go.sum index 91ebac1f11f..97579327106 100644 --- a/ecs-agent/go.sum +++ b/ecs-agent/go.sum @@ -1,204 +1,1026 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.9.9 h1:FYrTiCNOc8ZddNBVkJBxWZYm22rgxHFmxMoGK66sDF0= +github.com/Microsoft/hcsshim v0.9.9/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.36.0 h1:CscTrS+szX5iu34zk2bZrChnGO/GMtUYgMK1Xzs2hYo= github.com/aws/aws-sdk-go v1.36.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1 h1:FD1tADPls2EEi3flPc2OegIY1M9pUa9r2Quag7HMLV8= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/didip/tollbooth v4.0.2+incompatible h1:fVSa33JzSz0hoh2NxpwZtksAzAgd7zjmGO20HCZtF4M= github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -206,36 +1028,106 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.26.0-alpha.0 h1:cnXW2EigxCOrD+s52R9r5AZOcu1Nbv508gOCgSQkbo4= k8s.io/apimachinery v0.26.0-alpha.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/mount-utils v0.26.0-alpha.0 h1:OOqJgwTHRsCoeWHYa7VWMIQrQWJuDeBGK9an8CfBanA= k8s.io/mount-utils v0.26.0-alpha.0/go.mod h1:WTYq8Ev/JrnkqK2h1jFUnC8qWGuqzMb9XDC+Lu3WNU0= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/ecs-agent/netlib/model/ecscni/appmesh_config.go b/ecs-agent/netlib/model/ecscni/appmesh_config.go new file mode 100644 index 00000000000..568bcfdee79 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/appmesh_config.go @@ -0,0 +1,66 @@ +package ecscni + +import ( + "fmt" + + "github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/appmesh" +) + +// AppMeshConfig contains the information needed to incoke the appmesh CNI plugin. +type AppMeshConfig struct { + CNIConfig + // IgnoredUID specifies egress traffic from the processes owned by the UID will be ignored + IgnoredUID string `json:"ignoredUID,omitempty"` + // IgnoredGID specifies egress traffic from the processes owned by the GID will be ignored + IgnoredGID string `json:"ignoredGID,omitempty"` + // ProxyIngressPort is the ingress port number that proxy is listening on + ProxyIngressPort string `json:"proxyIngressPort"` + // ProxyEgressPort is the egress port number that proxy is listening on + ProxyEgressPort string `json:"proxyEgressPort"` + // AppPorts specifies port numbers that application is listening on + AppPorts []string `json:"appPorts"` + // EgressIgnoredPorts is the list of ports for which egress traffic will be ignored + EgressIgnoredPorts []string `json:"egressIgnoredPorts,omitempty"` + // EgressIgnoredIPs is the list of IPs for which egress traffic will be ignored + EgressIgnoredIPs []string `json:"egressIgnoredIPs,omitempty"` +} + +func NewAppMeshConfig(cniConfig CNIConfig, cfg *appmesh.AppMesh) *AppMeshConfig { + return &AppMeshConfig{ + CNIConfig: cniConfig, + IgnoredUID: cfg.IgnoredUID, + IgnoredGID: cfg.IgnoredGID, + ProxyIngressPort: cfg.ProxyIngressPort, + ProxyEgressPort: cfg.ProxyEgressPort, + AppPorts: cfg.AppPorts, + EgressIgnoredPorts: cfg.EgressIgnoredPorts, + EgressIgnoredIPs: cfg.EgressIgnoredIPs, + } +} + +func (amc *AppMeshConfig) String() string { + return fmt.Sprintf("%s, ignored uid: %s, ignored gid: %s, ingress port: %s, "+ + "egress port: %s, app ports: %v, ignored egress ips: %v, ignored egress ports: %v", + amc.CNIConfig.String(), amc.IgnoredUID, amc.IgnoredGID, + amc.ProxyIngressPort, amc.ProxyEgressPort, amc.AppPorts, + amc.EgressIgnoredIPs, amc.EgressIgnoredPorts) +} + +func (amc *AppMeshConfig) InterfaceName() string { + // Not required for app mesh plugin as no particular interface is set up in this + // plugin. The plugin sets up some iptables filters, that's all. However, CNI requires + // us to set it. Setting it to "eth0" just to satisfy that constraint. + return "eth0" +} + +func (amc *AppMeshConfig) NSPath() string { + return amc.NetNSPath +} + +func (amc *AppMeshConfig) PluginName() string { + return amc.CNIPluginName +} + +func (amc *AppMeshConfig) CNIVersion() string { + return amc.CNISpecVersion +} diff --git a/ecs-agent/netlib/model/ecscni/bridge_config.go b/ecs-agent/netlib/model/ecscni/bridge_config.go new file mode 100644 index 00000000000..cafafa669e1 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/bridge_config.go @@ -0,0 +1,43 @@ +package ecscni + +import ( + "fmt" +) + +// BridgeConfig defines the configuration for bridge plugin +type BridgeConfig struct { + CNIConfig + // Name is the name of bridge + Name string `json:"bridge"` + // IPAM is the configuration to acquire ip/route from ipam plugin + IPAM IPAMConfig `json:"ipam,omitempty"` + // DeviceName is the name of the veth inside the namespace + // this was used as a parameter of the libcni, thus don't need to be marshalled + // in the plugin configuration + DeviceName string `json:"-"` +} + +func (bc *BridgeConfig) String() string { + return fmt.Sprintf("%s, name: %s, ipam: %s", bc.CNIConfig.String(), bc.Name, bc.IPAM.String()) +} + +// InterfaceName returns the veth pair name will be used inside the namespace +func (bc *BridgeConfig) InterfaceName() string { + if bc.DeviceName == "" { + return defaultInterfaceName + } + + return bc.DeviceName +} + +func (bc *BridgeConfig) NSPath() string { + return bc.NetNSPath +} + +func (bc *BridgeConfig) CNIVersion() string { + return bc.CNISpecVersion +} + +func (bc *BridgeConfig) PluginName() string { + return bc.CNIPluginName +} diff --git a/ecs-agent/netlib/model/ecscni/client.go b/ecs-agent/netlib/model/ecscni/client.go new file mode 100644 index 00000000000..2de28de2037 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/client.go @@ -0,0 +1,86 @@ +package ecscni + +import ( + "context" + "encoding/json" + "os" + "os/exec" + "strings" + + "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/pkg/errors" +) + +const ( + versionCommand = "--version" +) + +// CNI defines the plugin invocation interface +type CNI interface { + // Add calls the plugin add command with given configuration + Add(context.Context, PluginConfig) (types.Result, error) + // Del calls the plugin del command with given configuration + Del(context.Context, PluginConfig) error + // Version calls the version command of plugin + Version(string) (string, error) +} + +// CNIClient is the client to invoke the plugin +type cniClient struct { + pluginPath []string + cni libcni.CNI +} + +// NewCNIClient creates a new CNIClient +func NewCNIClient(paths []string) CNI { + return &cniClient{ + pluginPath: paths, + cni: libcni.NewCNIConfig(paths, nil), + } +} + +// Add invokes the plugin with add command +func (c *cniClient) Add(ctx context.Context, config PluginConfig) (types.Result, error) { + rt := BuildRuntimeConfig(config) + net, err := BuildNetworkConfig(config) + if err != nil { + return nil, err + } + + return c.cni.AddNetwork(ctx, net, rt) +} + +// Del invokes the vpc-branch-eni plugin with del command +func (c *cniClient) Del(ctx context.Context, config PluginConfig) error { + rt := BuildRuntimeConfig(config) + net, err := BuildNetworkConfig(config) + if err != nil { + return err + } + + return c.cni.DelNetwork(ctx, net, rt) +} + +func (c *cniClient) Version(plugin string) (string, error) { + pathsFromEnv := strings.Split(os.Getenv("PATH"), ":") + file, err := invoke.FindInPath(plugin, append(c.pluginPath, pathsFromEnv...)) + if err != nil { + return "", errors.Wrapf(err, "unable to find plugin: %s", plugin) + } + + cmd := exec.Command(file, versionCommand) + versionInfo, err := cmd.Output() + if err != nil { + return "", errors.Wrapf(err, "unable to get version info for plugin: %s", plugin) + } + + version := &CNIPluginVersion{} + err = json.Unmarshal(versionInfo, version) + if err != nil { + return "", errors.Wrapf(err, "unable to unmarshal version info for plugin: %s", plugin) + } + + return version.String(), nil +} diff --git a/ecs-agent/netlib/model/ecscni/eni_config.go b/ecs-agent/netlib/model/ecscni/eni_config.go new file mode 100644 index 00000000000..e85c72da16d --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/eni_config.go @@ -0,0 +1,76 @@ +package ecscni + +import ( + "fmt" + + "github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/networkinterface" +) + +// ENIConfig contains all the information needed to invoke the eni plugin +type ENIConfig struct { + CNIConfig + // ENIID is the id of ec2 eni + ENIID string `json:"eni"` + // MacAddress is the mac address of eni + MACAddress string `json:"mac"` + // IPAddresses is the set of IP addresses assigned to the ENI. + IPAddresses []string `json:"ip-addresses"` + // GatewayIPAddresses is the set of subnet gateway IP addresses for the ENI. + GatewayIPAddresses []string `json:"gateway-ip-addresses"` + // BlockInstanceMetadata specifies if InstanceMetadata endpoint should be blocked. + BlockInstanceMetadata bool `json:"block-instance-metadata"` + // StayDown specifies if the ENI device should be brought up and configured. + StayDown bool `json:"stay-down"` + // DeviceName is the name of the interface will be set inside the namespace + // this was used as a parameter of the libcni, which is not part of the plugin + // configuration, thus no need to marshal + DeviceName string `json:"-"` + // MTU is the mtu of the eni that should be set if not default value + MTU int `json:"mtu"` +} + +func NewENIConfig( + cniConfig CNIConfig, + eni *networkinterface.NetworkInterface, + blockInstanceMetadata bool, + stayDown bool, + mtu int, +) *ENIConfig { + return &ENIConfig{ + CNIConfig: cniConfig, + ENIID: eni.ID, + MACAddress: eni.MacAddress, + IPAddresses: eni.GetIPAddressesWithPrefixLength(), + GatewayIPAddresses: []string{eni.GetSubnetGatewayIPv4Address()}, + BlockInstanceMetadata: blockInstanceMetadata, + StayDown: stayDown, + DeviceName: eni.DeviceName, + MTU: mtu, + } +} + +func (ec *ENIConfig) String() string { + return fmt.Sprintf("%s, eni: %s, mac: %s, ipAddrs: %v, gateways: %v,"+ + " blockIMDS: %v, stay-down: %t, mtu: %d", + ec.CNIConfig.String(), ec.ENIID, ec.MACAddress, ec.IPAddresses, ec.GatewayIPAddresses, + ec.BlockInstanceMetadata, ec.StayDown, ec.MTU) +} + +func (ec *ENIConfig) NSPath() string { + return ec.NetNSPath +} + +func (ec *ENIConfig) InterfaceName() string { + if ec.DeviceName == "" { + return defaultENIName + } + return ec.DeviceName +} + +func (ec *ENIConfig) CNIVersion() string { + return ec.CNISpecVersion +} + +func (ec *ENIConfig) PluginName() string { + return ec.CNIPluginName +} diff --git a/ecs-agent/netlib/model/ecscni/generate_mocks_linux.go b/ecs-agent/netlib/model/ecscni/generate_mocks_linux.go new file mode 100644 index 00000000000..179ff25f5d3 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/generate_mocks_linux.go @@ -0,0 +1,6 @@ +//go:build !windows +// +build !windows + +package ecscni + +//go:generate mockgen -build_flags=--mod=mod -destination=mocks/ecscni_mocks_linux.go github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/ecscni Config,NetNSUtil,CNI diff --git a/ecs-agent/netlib/model/ecscni/generate_mocks_windows.go b/ecs-agent/netlib/model/ecscni/generate_mocks_windows.go new file mode 100644 index 00000000000..509f5eb4a63 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/generate_mocks_windows.go @@ -0,0 +1,6 @@ +//go:build windows +// +build windows + +package ecscni + +//go:generate mockgen -build_flags=--mod=mod -destination=mocks/ecscni_mocks_windows.go github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/ecscni Config,NetNSUtil,CNI diff --git a/ecs-agent/netlib/model/ecscni/ipam_config.go b/ecs-agent/netlib/model/ecscni/ipam_config.go new file mode 100644 index 00000000000..78477f946e8 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/ipam_config.go @@ -0,0 +1,43 @@ +package ecscni + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// IPAMConfig defines the configuration required for ipam plugin +type IPAMConfig struct { + CNIConfig + // IPV4Subnet is the ip address range managed by ipam + IPV4Subnet string `json:"ipv4-subnet,omitempty"` + // IPV4Address is the ip address to deal with(assign or release) in ipam + IPV4Address string `json:"ipv4-address,omitempty"` + // IPV4Gateway is the gateway returned by ipam, defalut the '.1' in the subnet + IPV4Gateway string `json:"ipv4-gateway,omitempty"` + // IPV4Routes is the route to added in the container namespace + IPV4Routes []*types.Route `json:"ipv4-routes,omitempty"` + // ID is the key stored with the assigned ip in ipam + ID string `json:"id"` +} + +func (ic *IPAMConfig) String() string { + return fmt.Sprintf("%s, subnet: %s, ip: %s, gw: %s, route: %s", + ic.CNIConfig.String(), ic.IPV4Subnet, ic.IPV4Address, ic.IPV4Gateway, ic.IPV4Routes) +} + +func (ic *IPAMConfig) InterfaceName() string { + return "none" +} + +func (ic *IPAMConfig) NSPath() string { + return ic.NetNSPath +} + +func (ic *IPAMConfig) CNIVersion() string { + return ic.CNISpecVersion +} + +func (ic *IPAMConfig) PluginName() string { + return ic.CNIPluginName +} diff --git a/ecs-agent/netlib/model/ecscni/netconfig.go b/ecs-agent/netlib/model/ecscni/netconfig.go new file mode 100644 index 00000000000..d1ae7df3f0e --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/netconfig.go @@ -0,0 +1,96 @@ +package ecscni + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/types" + "github.com/pkg/errors" +) + +const ( + defaultInterfaceName = "eth0" + defaultENIName = "eth1" + + PluginLogPath = "/var/log/ecs/ecs-cni-warmpool.log" +) + +// PluginConfig is the general interface for a plugin's configuration +type PluginConfig interface { + // String returns the human readable information of the configuration + String() string + // InterfaceName returns the name of the interface to be configured + InterfaceName() string + // NSPath returns the path of the network namespace + NSPath() string + // PluginName returns the name of the plugin + PluginName() string + // CNIVersion returns the version of the cni spec + CNIVersion() string + // NetworkName returns the network name to be used by CNI plugin during network creation. + // NetworkName is part of the network configuration required as per the CNI specifications. + // https://github.com/containernetworking/cni/blob/master/SPEC.md + NetworkName() string + // ContainerID returns a plaintext identifier for a container. In our case we do not make use + // of this field, although it is required to include a non-empty value for it since the + // CNI framework enforces it. + ContainerID() string +} + +// CNIConfig defines the runtime configuration for invoking the plugin +type CNIConfig struct { + NetNSPath string `json:"-"` + CNISpecVersion string `json:"cniVersion"` + CNIPluginName string `json:"type"` +} + +func (cc *CNIConfig) String() string { + return fmt.Sprintf("ns: %s, version: %s, plugin: %s", + cc.NetNSPath, cc.CNISpecVersion, cc.CNIPluginName) +} + +// ContainerID returns a plaintext identifier for a container. In our case we do not make use +// of this field, although it is required to include a non-empty value for it since the +// CNI framework enforces it. Hence we return a fixed string. +func (cc *CNIConfig) ContainerID() string { + return "container-id" +} + +// NetworkName returns a plaintext identifier which should be unique across all network +// configurations on a host (or other administrative domain). In our case we do not make use +// of this field, although it is required to include a non-empty value for it since the +// CNI framework enforces it. Hence we return a fixed string. +func (cc *CNIConfig) NetworkName() string { + return "network-name" +} + +// BuildNetworkConfig constructs the network configuration follow the format of libcni +func BuildNetworkConfig(cfg PluginConfig) (*libcni.NetworkConfig, error) { + nc := &types.NetConf{ + Type: cfg.PluginName(), + CNIVersion: cfg.CNIVersion(), + Name: cfg.NetworkName(), + } + + data, err := json.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal the plugin configuration") + } + + netconfig := &libcni.NetworkConfig{ + Network: nc, + Bytes: data, + } + + return netconfig, nil +} + +// BuildRuntimeConfig constructs the runtime configuration following the format of libcni. +func BuildRuntimeConfig(cfg PluginConfig) *libcni.RuntimeConf { + return &libcni.RuntimeConf{ + NetNS: cfg.NSPath(), + IfName: cfg.InterfaceName(), + ContainerID: cfg.ContainerID(), + } +} diff --git a/ecs-agent/netlib/model/ecscni/netconfig_test.go b/ecs-agent/netlib/model/ecscni/netconfig_test.go new file mode 100644 index 00000000000..3e2d2dba0a6 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/netconfig_test.go @@ -0,0 +1,24 @@ +//go:build unit +// +build unit + +package ecscni + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuildNetworkConfig(t *testing.T) { + nsPath := "nspath" + cfg := &VPCTunnelConfig{ + CNIConfig: CNIConfig{ + NetNSPath: nsPath, + }, + } + + rt := BuildRuntimeConfig(cfg) + require.Equal(t, nsPath, rt.NetNS) + require.Equal(t, defaultInterfaceName, rt.IfName) + require.Equal(t, "container-id", rt.ContainerID) +} diff --git a/ecs-agent/netlib/model/ecscni/nsutil_linux.go b/ecs-agent/netlib/model/ecscni/nsutil_linux.go new file mode 100644 index 00000000000..7d920792782 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/nsutil_linux.go @@ -0,0 +1,141 @@ +//go:build !windows +// +build !windows + +package ecscni + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + + cnins "github.com/containernetworking/plugins/pkg/ns" + "github.com/pkg/errors" +) + +const ( + // dnsNameServerFormat defines the entry of nameserver in resov.conf file + dnsNameServerFormat = "nameserver %s\n" + // dnsNameServerFormat defines the entry of search in resov.conf file + dnsSearchDomainFormat = "search %s\n" +) + +// NetNSUtils provides some basic methods for agent to deal with network namespace +type NetNSUtil interface { + // NewNetNS creates a new network namespace in the system + NewNetNS(nsPath string) error + // DelNetNS deletes the network namespace from the system + DelNetNS(nsPath string) error + // GetNetNSPath cretes the network namespace path from named namespace + GetNetNSPath(nsName string) string + // GetNetNSName extract the ns name from the netns path + GetNetNSName(nsPath string) string + // NSExists checks if the given ns path exists or not + NSExists(nsPath string) (bool, error) + // ExecInNSPath invokes the function in the given network namespace + ExecInNSPath(nsPath string, cb func(cnins.NetNS) error) error + // BuildResolvConfig constructs the content of dns configuration file resolv.conf + BuildResolvConfig(nameservers, searchDomains []string) string +} + +type netnsutil struct { +} + +func NewNetNSUtil() NetNSUtil { + return &netnsutil{} +} + +// NewNetNS create a new network namespace with given path +func (*netnsutil) NewNetNS(nspath string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + currentNS, err := cnins.GetCurrentNS() + if err != nil { + return errors.Wrap(err, "unable to get the current network namespace") + } + + _, err = os.Stat(NETNS_PATH_DEFAULT) + if err != nil { + // Create the default network namespace directory path if not exists + if os.IsNotExist(err) { + err = os.Mkdir(NETNS_PATH_DEFAULT, nsFileMode) + } + if err != nil { + return errors.Wrap(err, "unable to get status of default ns directory") + } + } + + // Create a new network namespace + err = syscall.Unshare(syscall.CLONE_NEWNET) + if err != nil { + return errors.Wrap(err, "unable to create new ns with unshare") + } + + // Make this network namespace persistent + f, err := os.OpenFile(nspath, os.O_CREATE|os.O_EXCL, nsFileMode) + if err != nil { + return errors.Wrap(err, "unable to create ns file") + } + f.Close() + + nsPath := fmt.Sprintf(NETNS_PROC_FORMAT, os.Getpid(), syscall.Gettid()) + if err = syscall.Mount(nsPath, nspath, "", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { + return errors.Wrap(err, "unable to mount the ns path") + } + + return currentNS.Set() +} + +// DelNetNS remove the given network namespace +func (*netnsutil) DelNetNS(nspath string) error { + if err := syscall.Unmount(nspath, syscall.MNT_DETACH); err != nil { + return errors.Wrap(err, "unable to unmount the ns path") + } + + if err := syscall.Unlink(nspath); err != nil { + return errors.Wrap(err, "unable to del the ns file") + } + return nil +} + +// GetNetNSPath returns the full path for the given named network namespace +func (*netnsutil) GetNetNSPath(name string) string { + return filepath.Join(NETNS_PATH_DEFAULT, name) +} + +func (*netnsutil) GetNetNSName(path string) string { + return filepath.Base(path) +} + +// NSExists checks if the given namespace exists +func (nu *netnsutil) NSExists(nspath string) (bool, error) { + stat := &syscall.Statfs_t{} + err := syscall.Statfs(nspath, stat) + if os.IsNotExist(err) { + return false, nil + } + + return true, errors.Wrap(err, "unable to get the status of ns file") +} + +func (nu *netnsutil) ExecInNSPath(netNSPath string, toRun func(cnins.NetNS) error) error { + return cnins.WithNetNSPath(netNSPath, toRun) +} + +// BuildResolvConfig constructs the content of dns configuration file resolv.conf +func (nu *netnsutil) BuildResolvConfig(nameservers, searchDomains []string) string { + var bf strings.Builder + for _, nameserver := range nameservers { + bf.WriteString(fmt.Sprintf(dnsNameServerFormat, nameserver)) + } + + if len(searchDomains) != 0 { + searchDomainsList := strings.Join(searchDomains, " ") + bf.WriteString(fmt.Sprintf(dnsSearchDomainFormat, searchDomainsList)) + } + + return bf.String() +} diff --git a/ecs-agent/netlib/model/ecscni/nsutil_windows.go b/ecs-agent/netlib/model/ecscni/nsutil_windows.go new file mode 100644 index 00000000000..da5bd9ff5ee --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/nsutil_windows.go @@ -0,0 +1,90 @@ +//go:build windows +// +build windows + +package ecscni + +import ( + "runtime" + + "github.com/aws/amazon-ecs-agent/ecs-agent/utils/uuid" + + "github.com/Microsoft/hcsshim/hcn" + "github.com/pkg/errors" +) + +// NetNSUtil provides some basic methods for performing network namespace related operations. +type NetNSUtil interface { + // NewNetNS creates a new network namespace in the system. + NewNetNS(netNSID string) error + // DelNetNS deletes the network namespace from the system. + DelNetNS(netNSID string) error + // NewNetNSID generates a HCN Namespace ID. + NewNetNSID() string + // NSExists checks if the given namespace exists or not. + NSExists(netNSID string) (bool, error) +} + +type nsUtil struct{} + +// NewNetNSUtil creates a new instance of NetNSUtil. +func NewNetNSUtil() NetNSUtil { + return &nsUtil{} +} + +// NewNetNS creates a new network namespace with the given namespace id. +func (*nsUtil) NewNetNS(netNSID string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + hcnNetNs := &hcn.HostComputeNamespace{ + Id: netNSID, + SchemaVersion: hcn.V2SchemaVersion(), + } + + _, err := hcnNetNs.Create() + if err != nil { + return errors.Wrapf(err, "unable to create hcn namespace") + } + + return nil +} + +// DelNetNS removes the network namespace with the given namespace id. +func (*nsUtil) DelNetNS(netNSID string) error { + hcnNetNs, err := hcn.GetNamespaceByID(netNSID) + if err != nil { + // The possible reasons for HCN not able to find the specified network namespace can be- + // 1. Duplicate deletion calls are invoked for the same namespace. + // 2. DelNetNS is invoked before NewNetNS. + // 3. Error from HCN while trying to find the namespace. + return errors.Wrapf(err, "unable to find hcn namespace") + } + + err = hcnNetNs.Delete() + if err != nil { + return errors.Wrapf(err, "unable to delete hcn namespace") + } + + return nil +} + +// NewNetNSID generates a HCN Namespace ID. +func (*nsUtil) NewNetNSID() string { + return uuid.GenerateWithPrefix("", "") +} + +// NSExists checks if any namespace exists with the given namespace id. +func (*nsUtil) NSExists(netNSID string) (bool, error) { + _, err := hcn.GetNamespaceByID(netNSID) + if err != nil { + // If the HCN resource was not found then the namespace doesn't exist, so return false. + // Otherwise there was an error in HCN request, so return an error. + if hcn.IsNotFoundError(err) { + return false, nil + } else { + return false, errors.Wrap(err, "unable to get the status of ns") + } + } + + return true, nil +} diff --git a/ecs-agent/netlib/model/ecscni/plugin.go b/ecs-agent/netlib/model/ecscni/plugin.go new file mode 100644 index 00000000000..c7d19f8447b --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/plugin.go @@ -0,0 +1,31 @@ +package ecscni + +const ( + NETNS_PATH_DEFAULT = "/var/run/netns" + NETNS_PROC_FORMAT = "/proc/%d/task/%d/ns/net" + + nsFileMode = 0444 +) + +// Config is a general interface represents all kinds of plugin configs +type Config interface { + String() string +} + +// CNIPluginVersion is used to convert the JSON output of the +// '--version' command into a string +type CNIPluginVersion struct { + Version string `json:"version"` + Dirty bool `json:"dirty"` + Hash string `json:"gitShortHash"` +} + +// String returns the version information as formatted string +func (v *CNIPluginVersion) String() string { + ver := "" + if v.Dirty { + ver = "@" + } + + return ver + v.Hash + "-" + v.Version +} diff --git a/ecs-agent/netlib/model/ecscni/serviceconnect_config.go b/ecs-agent/netlib/model/ecscni/serviceconnect_config.go new file mode 100644 index 00000000000..8d75b3a9583 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/serviceconnect_config.go @@ -0,0 +1,103 @@ +package ecscni + +import ( + "fmt" + + "github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/serviceconnect" +) + +const redirectModeNat string = "nat" + +type ServiceConnectCNIConfig struct { + CNIConfig + // IngressConfig (optional) specifies the netfilter rules to be set for incoming requests. + IngressConfig []IngressConfig `json:"ingressConfig,omitempty"` + // EgressConfig (optional) specifies the netfilter rules to be set for outgoing requests. + EgressConfig EgressConfig `json:"egressConfig,omitempty"` + // EnableIPv4 (optional) specifies whether to set the rules in IPv4 table. Note that this. + EnableIPv4 bool `json:"enableIPv4,omitempty"` + // EnableIPv6 (optional) specifies whether to set the rules in IPv6 table. Default value is false. + EnableIPv6 bool `json:"enableIPv6,omitempty"` +} + +// IngressConfig defines the ingress network config in JSON format for the ecs-serviceconnect CNI plugin. +type IngressConfig struct { + ListenerPort int64 `json:"listenerPort"` + InterceptPort int64 `json:"interceptPort,omitempty"` +} + +// EgressConfig defines the egress network config in JSON format for the ecs-serviceconnect CNI plugin. +type EgressConfig struct { + ListenerPort int64 `json:"listenerPort"` + VIP vipConfig `json:"vip"` + // RedirectMode dictates what mechanism the plugin should use for redirecting egress traffic. + // For awsvpc mode the value is "nat" always. + RedirectMode string `json:"redirectMode"` +} + +// vipConfig defines the EgressVIP network config in JSON format for the ecs-serviceconnect CNI plugin. +type vipConfig struct { + IPv4CIDR string `json:"ipv4Cidr,omitempty"` + IPv6CIDR string `json:"ipv6Cidr,omitempty"` +} + +func NewServiceConnectCNIConfig( + cniConfig CNIConfig, + scConfig *serviceconnect.ServiceConnectConfig, + enableIPV4 bool, + enableIPV6 bool, +) *ServiceConnectCNIConfig { + var cniIngress []IngressConfig + for _, scIngress := range scConfig.IngressConfigList { + cniIngress = append(cniIngress, IngressConfig{ + ListenerPort: scIngress.ListenerPort, + InterceptPort: scIngress.InterceptPort, + }) + } + + var vip vipConfig + if scConfig.EgressConfig.ListenerName != "" { + vip.IPv6CIDR = scConfig.EgressConfig.IPV6CIDR + vip.IPv4CIDR = scConfig.EgressConfig.IPV4CIDR + } + + return &ServiceConnectCNIConfig{ + CNIConfig: cniConfig, + IngressConfig: cniIngress, + EgressConfig: EgressConfig{ + ListenerPort: scConfig.EgressConfig.ListenerPort, + VIP: vip, + RedirectMode: redirectModeNat, + }, + EnableIPv4: enableIPV4, + EnableIPv6: enableIPV6, + } +} + +func (sc *ServiceConnectCNIConfig) String() string { + return fmt.Sprintf("%s, ingressConfig: %v, egressConfig: %v, enableIPv4: %v, enableIPv6: %v", + sc.CNIConfig.String(), + sc.IngressConfig, + sc.EgressConfig, + sc.EnableIPv4, + sc.EnableIPv6) +} + +func (sc *ServiceConnectCNIConfig) InterfaceName() string { + // Not required for service connect plugin as no particular interface is set up in this + // plugin. The plugin sets up some iptables filters, that's all. However, CNI requires + // us to set it with a non-empty string. + return "eth0" +} + +func (sc *ServiceConnectCNIConfig) NSPath() string { + return sc.NetNSPath +} + +func (sc *ServiceConnectCNIConfig) PluginName() string { + return sc.CNIPluginName +} + +func (sc *ServiceConnectCNIConfig) CNIVersion() string { + return sc.CNISpecVersion +} diff --git a/ecs-agent/netlib/model/ecscni/vpcbranch_config.go b/ecs-agent/netlib/model/ecscni/vpcbranch_config.go new file mode 100644 index 00000000000..b7722f35028 --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/vpcbranch_config.go @@ -0,0 +1,52 @@ +package ecscni + +import ( + "fmt" +) + +// VPCBranchENIConfig defines the configuration for vpc-branch-eni plugin +type VPCBranchENIConfig struct { + CNIConfig + TrunkName string `json:"trunkName"` + TrunkMACAddress string `json:"trunkMACAddress"` + BranchVlanID string `json:"branchVlanID"` + BranchMACAddress string `json:"branchMACAddress"` + IPAddresses []string `json:"ipAddresses"` + GatewayIPAddresses []string `json:"gatewayIPAddresses"` + BlockIMDS bool `json:"blockInstanceMetadata"` + InterfaceType string `json:"interfaceType"` + UID string `json:"uid"` + GID string `json:"gid"` + + // this was used as a parameter of the libcni, which is not part of the plugin + // configuration, thus no need to marshal + IfName string `json:"_"` +} + +func (c *VPCBranchENIConfig) String() string { + return fmt.Sprintf("%s, trunk: %s, trunkMAC: %s, branchMAC: %s "+ + "ipAddrs: %v, gateways: %v, vlanID: %s, uid: %s, gid: %s, interfaceType: %s", + c.CNIConfig.String(), c.TrunkName, + c.TrunkMACAddress, c.BranchMACAddress, c.IPAddresses, c.GatewayIPAddresses, + c.BranchVlanID, c.UID, c.GID, c.InterfaceType) +} + +func (c *VPCBranchENIConfig) InterfaceName() string { + if c.IfName != "" { + return c.IfName + } + + return defaultInterfaceName +} + +func (c *VPCBranchENIConfig) NSPath() string { + return c.NetNSPath +} + +func (c *VPCBranchENIConfig) CNIVersion() string { + return c.CNISpecVersion +} + +func (c *VPCBranchENIConfig) PluginName() string { + return c.CNIPluginName +} diff --git a/ecs-agent/netlib/model/ecscni/vpceni_config.go b/ecs-agent/netlib/model/ecscni/vpceni_config.go new file mode 100644 index 00000000000..35a7b9e52ed --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/vpceni_config.go @@ -0,0 +1,61 @@ +package ecscni + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// VPCENIConfig contains all the information required to invoke the vpc-eni plugin. +type VPCENIConfig struct { + CNIConfig + // Name is the network name to be used in network configuration. + Name string `json:"name"` + // DNS is used to pass DNS information to the plugin. + DNS types.DNS `json:"dns"` + // ENIName is the device name of the eni on the instance. + ENIName string `json:"eniName"` + // ENIMACAddress is the MAC address of the eni. + ENIMACAddress string `json:"eniMACAddress"` + // ENIIPAddresses is the is the ipv4 of eni. + ENIIPAddresses []string `json:"eniIPAddresses"` + // GatewayIPAddresses specifies the IPv4 address of the subnet gateway for the eni. + GatewayIPAddresses []string `json:"gatewayIPAddresses"` + // UseExistingNetwork specifies if existing network should be used instead of creating a new one. + // For Task IAM roles, a pre-existing HNS network is available from which the HNS endpoint should be created. + // This field specifies that an existing network of provided name should be used during the network setup by the plugin. + UseExistingNetwork bool `json:"useExistingNetwork"` + // BlockIMDS specified if the instance metadata endpoint should be blocked for the tasks. + BlockIMDS bool `json:"blockInstanceMetadata"` +} + +func (ec *VPCENIConfig) String() string { + return fmt.Sprintf("%s, eni: %s, mac: %s, ipAddrs: %v, gateways: %v, "+ + "useExistingNetwork: %t, BlockIMDS: %t, dns: %v", + ec.CNIConfig.String(), ec.ENIName, ec.ENIMACAddress, ec.ENIIPAddresses, + ec.GatewayIPAddresses, ec.UseExistingNetwork, ec.BlockIMDS, ec.DNS) +} + +// InterfaceName returns the veth pair name will be used inside the namespace. +// For this plugin, interface name is redundant and would be generated in the plugin itself. +func (ec *VPCENIConfig) InterfaceName() string { + return defaultInterfaceName +} + +func (ec *VPCENIConfig) NSPath() string { + return ec.NetNSPath +} + +func (ec *VPCENIConfig) CNIVersion() string { + return ec.CNISpecVersion +} + +func (ec *VPCENIConfig) PluginName() string { + return ec.CNIPluginName +} + +func (ec *VPCENIConfig) NetworkName() string { + // We are using this field in the Windows CNI plugin while generating a network name/endpoint name. + // It is also used to find pre-existing network while setting up fargate-bridge for Task IAM roles. + return ec.Name +} diff --git a/ecs-agent/netlib/model/ecscni/vpctunnel_config.go b/ecs-agent/netlib/model/ecscni/vpctunnel_config.go new file mode 100644 index 00000000000..47ae9645b7c --- /dev/null +++ b/ecs-agent/netlib/model/ecscni/vpctunnel_config.go @@ -0,0 +1,52 @@ +package ecscni + +import ( + "fmt" +) + +// VPCTunnelConfig defines the configuration for vpc-tunnel plugin. This struct will +// be serialized and included as parameter while executing the CNI plugin. +type VPCTunnelConfig struct { + CNIConfig + DestinationIPAddress string `json:"destinationIPAddress"` + VNI string `json:"vni"` + DestinationPort string `json:"destinationPort"` + Primary bool `json:"primary"` + IPAddresses []string `json:"ipAddresses"` + GatewayIPAddress string `json:"gatewayIPAddress"` + InterfaceType string `json:"interfaceType"` + UID string `json:"uid"` + GID string `json:"gid"` + + // this was used as a parameter of the libcni, which is not part of the plugin + // configuration, thus no need to marshal + IfName string `json:"_"` +} + +func (c *VPCTunnelConfig) String() string { + return fmt.Sprintf("%s, destinationIPAddress: %s, vni: %s, destinationPort: %s, "+ + "ipAddrs: %v, gateways: %v, uid: %s, gid: %s, interfaceType: %s, primary: %v, ifName: %s", + c.CNIConfig.String(), c.DestinationIPAddress, + c.VNI, c.DestinationPort, c.IPAddresses, c.GatewayIPAddress, + c.UID, c.GID, c.InterfaceType, c.Primary, c.IfName) +} + +func (c *VPCTunnelConfig) InterfaceName() string { + if c.IfName != "" { + return c.IfName + } + + return defaultInterfaceName +} + +func (c *VPCTunnelConfig) NSPath() string { + return c.NetNSPath +} + +func (c *VPCTunnelConfig) CNIVersion() string { + return c.CNISpecVersion +} + +func (c *VPCTunnelConfig) PluginName() string { + return c.CNIPluginName +} diff --git a/ecs-agent/netlib/model/serviceconnect/serviceconnect.go b/ecs-agent/netlib/model/serviceconnect/serviceconnect.go new file mode 100644 index 00000000000..6b8c09beabf --- /dev/null +++ b/ecs-agent/netlib/model/serviceconnect/serviceconnect.go @@ -0,0 +1,44 @@ +package serviceconnect + +import ( + "github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/networkinterface" +) + +// IngressConfig holds inbound listener details. For each endpoint exposed by a service connect task, +// there will be one inbound listener configured in the service connect container (to be precise, in the envoy proxy). +type IngressConfig struct { + // ListenerPort is the port to which the envoy proxy will bind to. + ListenerPort int64 + // InterceptPort is the port exposed by the application container. + InterceptPort int64 + // ListenerName is the internal name used by AppNet for the listener. + ListenerName string +} + +// EgressConfig holds outbound listener details. There will be one outbound listener in each +// service connect container (to be precise, in the envoy proxy). All service connect enabled traffic +// from task application containers should pass through the outbound listener. +type EgressConfig struct { + // ListenerName is the internal name used by AppNet for the listener. + ListenerName string + // CIDR range used to identify outbound service connect traffic. + IPV4CIDR string + IPV6CIDR string + // ListenerPort is the port to which all traffic addressed to the CIDR range + // will be redirected to. This port is never specified in the SC payload and + // will always be generated by Fargate agent. + ListenerPort int64 +} + +// ServiceConnectConfig will contain all service connect specific data associated with a particular task. +type ServiceConnectConfig struct { + IngressConfigList []IngressConfig + EgressConfig EgressConfig + DNSMappingList []networkinterface.DNSMapping + + // StatsEndpoint is the path to the http endpoint from which service connect stats data is collected. + StatsEndpoint string + // ServiceConnectContainerName is the name of the special side container included in + // service connect enabled tasks. + ServiceConnectContainerName string +} diff --git a/ecs-agent/utils/uuid/uuid.go b/ecs-agent/utils/uuid/uuid.go new file mode 100644 index 00000000000..886715b5015 --- /dev/null +++ b/ecs-agent/utils/uuid/uuid.go @@ -0,0 +1,19 @@ +package uuid + +import ( + "fmt" + + "github.com/google/uuid" +) + +const ( + DefaultSeparator = "-" +) + +// GenerateWithPrefix returns a unique string with the provided prefix and separator. +func GenerateWithPrefix(prefix string, separator string) string { + if len(prefix) > 0 { + return fmt.Sprintf("%s%s%s", prefix, separator, uuid.New().String()) + } + return uuid.New().String() +} diff --git a/ecs-agent/utils/uuid/uuid_test.go b/ecs-agent/utils/uuid/uuid_test.go new file mode 100644 index 00000000000..dc80865bd92 --- /dev/null +++ b/ecs-agent/utils/uuid/uuid_test.go @@ -0,0 +1,48 @@ +//go:build unit +// +build unit + +package uuid + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGenerateUuidWithPrefix(t *testing.T) { + for _, testCase := range []struct { + prefix string + separator string + expectedPrefix string + }{ + { + prefix: "prefix", + separator: DefaultSeparator, + expectedPrefix: "prefix-", + }, + { + prefix: "prefix", + separator: "+", + expectedPrefix: "prefix+", + }, + { + prefix: "prefix", + separator: "", + expectedPrefix: "prefix", + }, + { + prefix: "", + separator: DefaultSeparator, + expectedPrefix: "", + }, + { + prefix: "", + separator: "", + expectedPrefix: "", + }, + } { + generatedUuid := GenerateWithPrefix(testCase.prefix, testCase.separator) + assert.True(t, strings.HasPrefix(generatedUuid, testCase.expectedPrefix)) + } +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/.gitignore b/ecs-agent/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 00000000000..b883f1fdc6d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/ecs-agent/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 00000000000..ae1b4942b91 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/LICENSE b/ecs-agent/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 00000000000..b8b569d7746 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/README.md b/ecs-agent/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000000..683be1dcf9c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,37 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/backup.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000000..2be34af4310 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/ea.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 00000000000..4051c1b33bf --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/file.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000000..293ab54c80c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,329 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/fileinfo.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000000..3ab6bff69c5 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,73 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + pad uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/hvsock.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 00000000000..b2b644d002a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,316 @@ +//go:build windows +// +build windows + +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +func (conn *HvsockConn) shutdown(how int) error { + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket, preventing future read operations. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pipe.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000000..96700a73de2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,517 @@ +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 + + cFILE_OPEN = 1 + cFILE_CREATE = 2 + + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 + + cSE_DACL_PRESENT = 4 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { + for { + + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path, access) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= cFILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 00000000000..2d9161e2dee --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,228 @@ +// +build windows + +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 00000000000..f64d828c0ba --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 00000000000..83617f4eee9 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go new file mode 100644 index 00000000000..602920786c9 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -0,0 +1,160 @@ +// +build windows + +package security + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +type ( + accessMask uint32 + accessMode uint32 + desiredAccess uint32 + inheritMode uint32 + objectType uint32 + shareMode uint32 + securityInformation uint32 + trusteeForm uint32 + trusteeType uint32 + + explicitAccess struct { + accessPermissions accessMask + accessMode accessMode + inheritance inheritMode + trustee trustee + } + + trustee struct { + multipleTrustee *trustee + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr + } +) + +const ( + accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ + + accessModeGrant accessMode = 1 + + desiredAccessReadControl desiredAccess = 0x20000 + desiredAccessWriteDac desiredAccess = 0x40000 + + gvmga = "GrantVmGroupAccess:" + + inheritModeNoInheritance inheritMode = 0x0 + inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 + + objectTypeFileObject objectType = 0x1 + + securityInformationDACL securityInformation = 0x4 + + shareModeRead shareMode = 0x1 + shareModeWrite shareMode = 0x2 + + sidVmGroup = "S-1-5-83-0" + + trusteeFormIsSid trusteeForm = 0 + + trusteeTypeWellKnownGroup trusteeType = 5 +) + +// GrantVMGroupAccess sets the DACL for a specified file or directory to +// include Grant ACE entries for the VM Group SID. This is a golang re- +// implementation of the same function in vmcompute, just not exported in +// RS5. Which kind of sucks. Sucks a lot :/ +func GrantVmGroupAccess(name string) error { + // Stat (to determine if `name` is a directory). + s, err := os.Stat(name) + if err != nil { + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) + } + + // Get a handle to the file/directory. Must defer Close on success. + fd, err := createFile(name, s.IsDir()) + if err != nil { + return err // Already wrapped + } + defer syscall.CloseHandle(fd) + + // Get the current DACL and Security Descriptor. Must defer LocalFree on success. + ot := objectTypeFileObject + si := securityInformationDACL + sd := uintptr(0) + origDACL := uintptr(0) + if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + + // Generate a new DACL which is the current DACL with the required ACEs added. + // Must defer LocalFree on success. + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) + if err != nil { + return err // Already wrapped + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + + // And finally use SetSecurityInfo to apply the updated DACL. + if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) + } + + return nil +} + +// createFile is a helper function to call [Nt]CreateFile to get a handle to +// the file or directory. +func createFile(name string, isDir bool) (syscall.Handle, error) { + namep := syscall.StringToUTF16(name) + da := uint32(desiredAccessReadControl | desiredAccessWriteDac) + sm := uint32(shareModeRead | shareModeWrite) + fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + if isDir { + fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + } + fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) + if err != nil { + return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) + } + return fd, nil +} + +// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. +// The caller is responsible for LocalFree of the returned DACL on success. +func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { + // Generate pointers to the SIDs based on the string SIDs + sid, err := syscall.StringToSid(sidVmGroup) + if err != nil { + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) + } + + inheritance := inheritModeNoInheritance + if isDir { + inheritance = inheritModeSubContainersAndObjectsInherit + } + + eaArray := []explicitAccess{ + explicitAccess{ + accessPermissions: accessMaskDesiredPermission, + accessMode: accessModeGrant, + inheritance: inheritance, + trustee: trustee{ + trusteeForm: trusteeFormIsSid, + trusteeType: trusteeTypeWellKnownGroup, + name: uintptr(unsafe.Pointer(sid)), + }, + }, + } + + modifiedDACL := uintptr(0) + if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) + } + + return modifiedDACL, nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go new file mode 100644 index 00000000000..d7096716ce2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -0,0 +1,7 @@ +package security + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go + +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go new file mode 100644 index 00000000000..4084680e0f0 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -0,0 +1,70 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package security + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") +) + +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/privilege.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000000..c3dd7c21769 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,203 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/reparse.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000000..fc1ee4d3a3e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/sd.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000000..db1b370a1b5 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/syscall.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000000..5955c99fdea --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 00000000000..f7f78fc2304 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,350 @@ +//go:build windows +// +build windows + +package vhd + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go + +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath + +type ( + CreateVirtualDiskFlag uint32 + VirtualDiskFlag uint32 + AttachVirtualDiskFlag uint32 + DetachVirtualDiskFlag uint32 + VirtualDiskAccessMask uint32 +) + +type VirtualStorageType struct { + DeviceID uint32 + VendorID guid.GUID +} + +type CreateVersion2 struct { + UniqueID guid.GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + PhysicalSectorSizeInByte uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType VirtualStorageType + SourceVirtualStorageType VirtualStorageType + ResiliencyGUID guid.GUID +} + +type CreateVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 CreateVersion2 +} + +type OpenVersion2 struct { + GetInfoOnly bool + ReadOnly bool + ResiliencyGUID guid.GUID +} + +type OpenVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 OpenVersion2 +} + +// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + +type AttachVersion2 struct { + RestrictedOffset uint64 + RestrictedLength uint64 +} + +type AttachVirtualDiskParameters struct { + Version uint32 + Version2 AttachVersion2 +} + +const ( + VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 + + // Access Mask for opening a VHD + VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 + VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 + VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 + VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 + VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 + VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 + VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 + VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 + VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 + VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 + + // Flags for creating a VHD + CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 + CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 + CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 + CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 + CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 + CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 + CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 + + // Flags for opening a VHD + OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 + OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 + OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 + OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 + OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 + OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 + OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 + OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 + OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 + OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 + OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 + + // Flags for attaching a VHD + AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 + AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 + AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 + AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 + AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 + AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 + AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 + AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 + AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 + AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 + AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 + + // Flags for detaching a VHD + DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 +) + +// CreateVhdx is a helper function to create a simple vhdx file at the given path using +// default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + params := CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) + if err != nil { + return err + } + + return syscall.CloseHandle(handle) +} + +// DetachVirtualDisk detaches a virtual hard disk by handle. +func DetachVirtualDisk(handle syscall.Handle) (err error) { + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return fmt.Errorf("failed to detach virtual disk: %w", err) + } + return nil +} + +// DetachVhd detaches a vhd found at `path`. +func DetachVhd(path string) error { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + return DetachVirtualDisk(handle) +} + +// AttachVirtualDisk attaches a virtual hard disk for use. +func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { + // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. + if err := attachVirtualDisk( + handle, + nil, + uint32(attachVirtualDiskFlag), + 0, + parameters, + nil, + ); err != nil { + return fmt.Errorf("failed to attach virtual disk: %w", err) + } + return nil +} + +// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 +// of the ATTACH_VIRTUAL_DISK_PARAMETERS. +func AttachVhd(path string) (err error) { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + + defer syscall.CloseHandle(handle) + params := AttachVirtualDiskParameters{Version: 2} + if err := AttachVirtualDisk( + handle, + AttachVirtualDiskFlagNone, + ¶ms, + ); err != nil { + return fmt.Errorf("failed to attach virtual disk: %w", err) + } + return nil +} + +// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. +func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { + parameters := OpenVirtualDiskParameters{Version: 2} + handle, err := OpenVirtualDiskWithParameters( + vhdPath, + virtualDiskAccessMask, + openVirtualDiskFlags, + ¶meters, + ) + if err != nil { + return 0, err + } + return handle, nil +} + +// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. +func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } + if err := openVirtualDisk( + &defaultType, + vhdPath, + uint32(virtualDiskAccessMask), + uint32(openVirtualDiskFlags), + params, + &handle, + ); err != nil { + return 0, fmt.Errorf("failed to open virtual disk: %w", err) + } + return handle, nil +} + +// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. +func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + + if err := createVirtualDisk( + &defaultType, + path, + uint32(virtualDiskAccessMask), + nil, + uint32(createVirtualDiskFlags), + 0, + parameters, + nil, + &handle, + ); err != nil { + return handle, fmt.Errorf("failed to create virtual disk: %w", err) + } + return handle, nil +} + +// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical +// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer +// that represents the particular enumeration of the physical disk on the caller's system. +func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { + var ( + diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars + diskPhysicalPathBuf [256]uint16 + ) + if err := getVirtualDiskPhysicalPath( + handle, + &diskPathSizeInBytes, + &diskPhysicalPathBuf[0], + ); err != nil { + return "", fmt.Errorf("failed to get disk physical path: %w", err) + } + return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil +} + +// CreateDiffVhd is a helper function to create a differencing virtual disk. +func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { + // Setting `ParentPath` is how to signal to create a differencing disk. + createParams := &CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + ParentPath: windows.StringToUTF16Ptr(baseVhdPath), + BlockSizeInBytes: blockSizeInMB * 1024 * 1024, + OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), + }, + } + + vhdHandle, err := CreateVirtualDisk( + diffVhdPath, + VirtualDiskAccessNone, + CreateVirtualDiskFlagNone, + createParams, + ) + if err != nil { + return fmt.Errorf("failed to create differencing vhd: %w", err) + } + if err := syscall.CloseHandle(vhdHandle); err != nil { + return fmt.Errorf("failed to close differencing vhd handle: %w", err) + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go new file mode 100644 index 00000000000..1d7498db3be --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -0,0 +1,106 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") + procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") + procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") +) + +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { + r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) +} + +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 00000000000..176ff75e320 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,427 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procbind = modws2_32.NewProc("bind") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/LICENSE b/ecs-agent/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 00000000000..49d21669aee --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go new file mode 100644 index 00000000000..7f1f2823ddc --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// AttachLayerStorageFilter sets up the layer storage filter on a writable +// container layer. +// +// `layerPath` is a path to a directory the writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.AttachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach layer storage filter") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go new file mode 100644 index 00000000000..8e28e6c5046 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DestroyLayer deletes a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DestroyLayer(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDestroyLayer(layerPath) + if err != nil { + return errors.Wrap(err, "failed to destroy layer") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go new file mode 100644 index 00000000000..435473257e3 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DetachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDetachLayerStorageFilter(layerPath) + if err != nil { + return errors.Wrap(err, "failed to detach layer storage filter") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/export.go new file mode 100644 index 00000000000..a1b12dd1292 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -0,0 +1,46 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ExportLayer exports a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +// +// `exportFolderPath` is a pre-existing folder to export the layer to. +// +// `layerData` is the parent layer data. +// +// `options` are the export options applied to the exported layer. +func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { + title := "hcsshim.ExportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("exportFolderPath", exportFolderPath), + ) + + ldbytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + obytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) + if err != nil { + return errors.Wrap(err, "failed to export layer") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/format.go new file mode 100644 index 00000000000..83c0fa33f0f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. +// +// If the VHD is not mounted it will be temporarily mounted. +func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { + title := "hcsshim.FormatWritableLayerVhd" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + err = hcsFormatWritableLayerVhd(vhdHandle) + if err != nil { + return errors.Wrap(err, "failed to format writable layer vhd") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go new file mode 100644 index 00000000000..87fee452cd3 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -0,0 +1,193 @@ +package computestorage + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio/pkg/security" + "github.com/Microsoft/go-winio/vhd" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const defaultVHDXBlockSizeInMB = 1 + +// SetupContainerBaseLayer is a helper to setup a containers scratch. It +// will create and format the vhdx's inside and the size is configurable with the sizeInGB +// parameter. +// +// `layerPath` is the path to the base container layer on disk. +// +// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. +// +// `diffVhdPath` is the path where the differencing disk for the base layer should be created. +// +// `sizeInGB` is the size in gigabytes to make the base vhdx. +func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + var ( + hivesPath = filepath.Join(layerPath, "Hives") + layoutPath = filepath.Join(layerPath, "Layout") + ) + + // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files + // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and + // differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(hivesPath); err == nil { + if err := os.RemoveAll(hivesPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting hives directory") + } + } + if _, err := os.Stat(layoutPath); err == nil { + if err := os.RemoveAll(layoutPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting layout file") + } + } + + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx path") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { + return err + } + // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + options := OsLayerOptions{ + Type: OsLayerTypeContainer, + } + + // SetupBaseOSLayer expects an empty vhd handle for a container layer and will + // error out otherwise. + if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { + return err + } + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} + +// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format +// the vhdx inside and the size is configurable by the sizeInGB parameter. +// +// `uvmPath` is the path to the UtilityVM filesystem. +// +// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. +// +// `diffVhdPath` is the path where the differencing disk for the UVM should be created. +// +// `sizeInGB` specifies the size in gigabytes to make the base vhdx. +func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + // Remove the base and differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + // Just create the vhdx for utilityVM layer, no need to format it. + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + // If it is a UtilityVM layer then the base vhdx must be attached when calling + // `SetupBaseOSLayer` + attachParams := &vhd.AttachVirtualDiskParameters{ + Version: 2, + } + if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { + return errors.Wrapf(err, "failed to attach virtual disk") + } + + options := OsLayerOptions{ + Type: OsLayerTypeVM, + } + if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { + return err + } + + // Detach and close the handle after setting up the layer as we don't need the handle + // for anything else and we no longer need to be attached either. + if err = vhd.DetachVirtualDisk(handle); err != nil { + return errors.Wrap(err, "failed to detach vhdx") + } + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/import.go new file mode 100644 index 00000000000..0c61dab3291 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -0,0 +1,41 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ImportLayer imports a container layer. +// +// `layerPath` is a path to a directory to import the layer to. If the directory +// does not exist it will be automatically created. +// +// `sourceFolderpath` is a pre-existing folder that contains the layer to +// import. +// +// `layerData` is the parent layer data. +func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { + title := "hcsshim.ImportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("sourceFolderPath", sourceFolderPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to import layer") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go new file mode 100644 index 00000000000..53ed8ea6eda --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// InitializeWritableLayer initializes a writable layer for a container. +// +// `layerPath` is a path to a directory the layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.InitializeWritableLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + // Options are not used in the platform as of RS5 + err = hcsInitializeWritableLayer(layerPath, string(bytes), "") + if err != nil { + return errors.Wrap(err, "failed to intitialize container layer") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go new file mode 100644 index 00000000000..fcdbbef8143 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -0,0 +1,27 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. +func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { + title := "hcsshim.GetLayerVhdMountPath" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var mountPath *uint16 + err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) + if err != nil { + return "", errors.Wrap(err, "failed to get vhd mount path") + } + path = interop.ConvertAndFreeCoTaskMemString(mountPath) + return path, nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go new file mode 100644 index 00000000000..06aaf841e8f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -0,0 +1,74 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// SetupBaseOSLayer sets up a layer that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` +// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type +// == OsLayerTypeVm`. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { + title := "hcsshim.SetupBaseOSLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} + +// SetupBaseOSVolume sets up a volume that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `volumePath` is the path to the volume to be used for setup. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { + if osversion.Build() < 19645 { + return errors.New("SetupBaseOSVolume is not present on builds older than 19645") + } + title := "hcsshim.SetupBaseOSVolume" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go new file mode 100644 index 00000000000..95aff9c1848 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -0,0 +1,50 @@ +// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced +// separate from the original graphdriver calls intended to give more freedom around creating +// and managing container layers and scratch spaces. +package computestorage + +import ( + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go + +//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? +//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? +//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? +//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? +//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? +//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? +//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? +//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? +//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? + +// LayerData is the data used to describe parent layer information. +type LayerData struct { + SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` +} + +// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. +type ExportLayerOptions struct { + IsWritableLayer bool `json:"IsWritableLayer,omitempty"` +} + +// OsLayerType is the type of layer being operated on. +type OsLayerType string + +const ( + // OsLayerTypeContainer is a container layer. + OsLayerTypeContainer OsLayerType = "Container" + // OsLayerTypeVM is a virtual machine layer. + OsLayerTypeVM OsLayerType = "Vm" +) + +// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and +// `SetupBaseOSVolume` calls. +type OsLayerOptions struct { + Type OsLayerType `json:"Type,omitempty"` + DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` + SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go new file mode 100644 index 00000000000..4f951806743 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -0,0 +1,319 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package computestorage + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") + + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") + procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") + procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") +) + +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) +} + +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p3 *uint16 + _p3, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsExportLayer(_p0, _p1, _p2, _p3) +} + +func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + if hr = procHcsDestoryLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSLayer(_p0, handle, _p1) +} + +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsInitializeWritableLayer(_p0, _p1, _p2) +} + +func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsInitializeWritableLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSVolume(_p0, _p1, _p2) +} + +func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go new file mode 100644 index 00000000000..df3a59a78cb --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go @@ -0,0 +1,328 @@ +// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server +// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +package hcn + +import ( + "encoding/json" + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go + +/// HNS V1 API + +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +/// HCN V2 API + +// Network +//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks? +//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork? +//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork? +//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork? +//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties? +//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork? +//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork? + +// Endpoint +//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints? +//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint? +//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint? +//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint? +//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties? +//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint? +//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint? + +// Namespace +//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces? +//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace? +//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace? +//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace? +//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties? +//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace? +//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace? + +// LoadBalancer +//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers? +//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer? +//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer? +//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer? +//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties? +//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? +//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? + +// SDN Routes +//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes? +//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute? +//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute? +//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute? +//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties? +//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute? +//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute? + +type _guid = guid.GUID + +type hcnNetwork syscall.Handle +type hcnEndpoint syscall.Handle +type hcnNamespace syscall.Handle +type hcnLoadBalancer syscall.Handle +type hcnRoute syscall.Handle + +// SchemaVersion for HCN Objects/Queries. +type SchemaVersion = Version // hcnglobals.go + +// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which +// properties of an object are returned. +type HostComputeQueryFlags uint32 + +var ( + // HostComputeQueryFlagsNone returns an object with the standard properties. + HostComputeQueryFlagsNone HostComputeQueryFlags + // HostComputeQueryFlagsDetailed returns an object with all properties. + HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1 +) + +// HostComputeQuery is the format for HCN queries. +type HostComputeQuery struct { + SchemaVersion SchemaVersion `json:""` + Flags HostComputeQueryFlags `json:",omitempty"` + Filter string `json:",omitempty"` +} + +type ExtraParams struct { + Resources json.RawMessage `json:",omitempty"` + SharedContainers json.RawMessage `json:",omitempty"` + LayeredOn string `json:",omitempty"` + SwitchGuid string `json:",omitempty"` + UtilityVM string `json:",omitempty"` + VirtualMachine string `json:",omitempty"` +} + +type Health struct { + Data interface{} `json:",omitempty"` + Extra ExtraParams `json:",omitempty"` +} + +// defaultQuery generates HCN Query. +// Passed into get/enumerate calls to filter results. +func defaultQuery() HostComputeQuery { + query := HostComputeQuery{ + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + Flags: HostComputeQueryFlagsNone, + } + return query +} + +// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS +func platformDoesNotSupportError(featureName string) error { + return fmt.Errorf("platform does not support feature %s", featureName) +} + +// V2ApiSupported returns an error if the HCN version does not support the V2 Apis. +func V2ApiSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.Api.V2 { + return nil + } + return platformDoesNotSupportError("V2 Api/Schema") +} + +func V2SchemaVersion() SchemaVersion { + return SchemaVersion{ + Major: 2, + Minor: 0, + } +} + +// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. +func RemoteSubnetSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.RemoteSubnet { + return nil + } + return platformDoesNotSupportError("Remote Subnet") +} + +// HostRouteSupported returns an error if the HCN version does not support Host Route policies. +func HostRouteSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.HostRoute { + return nil + } + return platformDoesNotSupportError("Host Route") +} + +// DSRSupported returns an error if the HCN version does not support Direct Server Return. +func DSRSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.DSR { + return nil + } + return platformDoesNotSupportError("Direct Server Return (DSR)") +} + +// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. +func Slash32EndpointPrefixesSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.Slash32EndpointPrefixes { + return nil + } + return platformDoesNotSupportError("Slash 32 Endpoint prefixes") +} + +// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. +func AclSupportForProtocol252Supported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.AclSupportForProtocol252 { + return nil + } + return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN") +} + +// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. +func SessionAffinitySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.SessionAffinity { + return nil + } + return platformDoesNotSupportError("Session Affinity") +} + +// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. +func IPv6DualStackSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.IPv6DualStack { + return nil + } + return platformDoesNotSupportError("IPv6 DualStack") +} + +//L4proxySupported returns an error if the HCN verison does not support L4Proxy +func L4proxyPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.L4Proxy { + return nil + } + return platformDoesNotSupportError("L4ProxyPolicy") +} + +// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy +func L4WfpProxyPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.L4WfpProxy { + return nil + } + return platformDoesNotSupportError("L4WfpProxyPolicy") +} + +// SetPolicySupported returns an error if the HCN version does not support SetPolicy. +func SetPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.SetPolicy { + return nil + } + return platformDoesNotSupportError("SetPolicy") +} + +// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. +func VxlanPortSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.VxlanPort { + return nil + } + return platformDoesNotSupportError("VXLAN port configuration") +} + +// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. +func TierAclPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.TierAcl { + return nil + } + return platformDoesNotSupportError("TierAcl") +} + +// NetworkACLPolicySupported returns an error if the HCN version does not support NetworkACLPolicy +func NetworkACLPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.NetworkACL { + return nil + } + return platformDoesNotSupportError("NetworkACL") +} + +// NestedIpSetSupported returns an error if the HCN version does not support NestedIpSet +func NestedIpSetSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.NestedIpSet { + return nil + } + return platformDoesNotSupportError("NestedIpSet") +} + +// RequestType are the different operations performed to settings. +// Used to update the settings of Endpoint/Namespace objects. +type RequestType string + +var ( + // RequestTypeAdd adds the provided settings object. + RequestTypeAdd RequestType = "Add" + // RequestTypeRemove removes the provided settings object. + RequestTypeRemove RequestType = "Remove" + // RequestTypeUpdate replaces settings with the ones provided. + RequestTypeUpdate RequestType = "Update" + // RequestTypeRefresh refreshes the settings provided. + RequestTypeRefresh RequestType = "Refresh" +) diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go new file mode 100644 index 00000000000..545e8639d6c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go @@ -0,0 +1,388 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// IpConfig is assoicated with an endpoint +type IpConfig struct { + IpAddress string `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` +} + +// EndpointFlags are special settings on an endpoint. +type EndpointFlags uint32 + +var ( + // EndpointFlagsNone is the default. + EndpointFlagsNone EndpointFlags + // EndpointFlagsRemoteEndpoint means that an endpoint is on another host. + EndpointFlagsRemoteEndpoint EndpointFlags = 1 +) + +// HostComputeEndpoint represents a network endpoint +type HostComputeEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + HostComputeNetwork string `json:",omitempty"` // GUID + HostComputeNamespace string `json:",omitempty"` // GUID + Policies []EndpointPolicy `json:",omitempty"` + IpConfigurations []IpConfig `json:",omitempty"` + Dns Dns `json:",omitempty"` + Routes []Route `json:",omitempty"` + MacAddress string `json:",omitempty"` + Flags EndpointFlags `json:",omitempty"` + Health Health `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// EndpointResourceType are the two different Endpoint settings resources. +type EndpointResourceType string + +var ( + // EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT + EndpointResourceTypePolicy EndpointResourceType = "Policy" + // EndpointResourceTypePort is for Endpoint Port settings. + EndpointResourceTypePort EndpointResourceType = "Port" +) + +// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint. +// Used to update policy/port on an endpoint. +type ModifyEndpointSettingRequest struct { + ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +// VmEndpointRequest creates a switch port with identifier `PortId`. +type VmEndpointRequest struct { + PortId guid.GUID `json:",omitempty"` + VirtualNicName string `json:",omitempty"` + VirtualMachineId guid.GUID `json:",omitempty"` +} + +type PolicyEndpointRequest struct { + Policies []EndpointPolicy `json:",omitempty"` +} + +func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) { + // Open endpoint. + var ( + endpointHandle hcnEndpoint + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) + if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Query endpoint. + hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close endpoint. + hr = hcnCloseEndpoint(endpointHandle) + if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeEndpoint + var outputEndpoint HostComputeEndpoint + if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { + return nil, err + } + return &outputEndpoint, nil +} + +func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { + // Enumerate all Endpoint Guids + var ( + resultBuffer *uint16 + endpointBuffer *uint16 + ) + hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil { + return nil, err + } + + endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer) + var endpointIds []guid.GUID + err := json.Unmarshal([]byte(endpoints), &endpointIds) + if err != nil { + return nil, err + } + + var outputEndpoints []HostComputeEndpoint + for _, endpointGuid := range endpointIds { + endpoint, err := getEndpoint(endpointGuid, query) + if err != nil { + return nil, err + } + outputEndpoints = append(outputEndpoints, *endpoint) + } + return outputEndpoints, nil +} + +func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { + networkGuid, err := guid.FromString(networkId) + if err != nil { + return nil, errInvalidNetworkID + } + // Open network. + var networkHandle hcnNetwork + var resultBuffer *uint16 + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Create endpoint. + endpointId := guid.GUID{} + var endpointHandle hcnEndpoint + hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer) + if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Query endpoint. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + var propertiesBuffer *uint16 + hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close endpoint. + hr = hcnCloseEndpoint(endpointHandle) + if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { + return nil, err + } + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeEndpoint + var outputEndpoint HostComputeEndpoint + if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { + return nil, err + } + return &outputEndpoint, nil +} + +func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { + endpointGuid, err := guid.FromString(endpointId) + if err != nil { + return nil, errInvalidEndpointID + } + // Open endpoint + var ( + endpointHandle hcnEndpoint + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) + if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Modify endpoint + hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Query endpoint. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close endpoint. + hr = hcnCloseEndpoint(endpointHandle) + if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeEndpoint + var outputEndpoint HostComputeEndpoint + if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { + return nil, err + } + return &outputEndpoint, nil +} + +func deleteEndpoint(endpointId string) error { + endpointGuid, err := guid.FromString(endpointId) + if err != nil { + return errInvalidEndpointID + } + var resultBuffer *uint16 + hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListEndpoints makes a call to list all available endpoints. +func ListEndpoints() ([]HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + endpoints, err := ListEndpointsQuery(hcnQuery) + if err != nil { + return nil, err + } + return endpoints, nil +} + +// ListEndpointsQuery makes a call to query the list of available endpoints. +func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + endpoints, err := enumerateEndpoints(string(queryJson)) + if err != nil { + return nil, err + } + return endpoints, nil +} + +// ListEndpointsOfNetwork queries the list of endpoints on a network. +func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + // TODO: Once query can convert schema, change to {HostComputeNetwork:networkId} + mapA := map[string]string{"VirtualNetwork": networkId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + return ListEndpointsQuery(hcnQuery) +} + +// GetEndpointByID returns an endpoint specified by Id +func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": endpointId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + endpoints, err := ListEndpointsQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(endpoints) == 0 { + return nil, EndpointNotFoundError{EndpointID: endpointId} + } + return &endpoints[0], err +} + +// GetEndpointByName returns an endpoint specified by Name +func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"Name": endpointName} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + endpoints, err := ListEndpointsQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(endpoints) == 0 { + return nil, EndpointNotFoundError{EndpointName: endpointName} + } + return &endpoints[0], err +} + +// Create Endpoint. +func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { + logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) + + if endpoint.HostComputeNamespace != "" { + return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") + } + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString) + endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return endpoint, nil +} + +// Delete Endpoint. +func (endpoint *HostComputeEndpoint) Delete() error { + logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id) + + if err := deleteEndpoint(endpoint.Id); err != nil { + return err + } + return nil +} + +// ModifyEndpointSettings updates the Port/Policy of an Endpoint. +func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error { + logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId) + + endpointSettingsRequest, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyEndpoint(endpointId, string(endpointSettingsRequest)) + if err != nil { + return err + } + return nil +} + +// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint. +func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { + logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) + + settingsJson, err := json.Marshal(endpointPolicy) + if err != nil { + return err + } + requestMessage := &ModifyEndpointSettingRequest{ + ResourceType: EndpointResourceTypePolicy, + RequestType: requestType, + Settings: settingsJson, + } + + return ModifyEndpointSettings(endpoint.Id, requestMessage) +} + +// NamespaceAttach modifies a Namespace to add an endpoint. +func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error { + return AddNamespaceEndpoint(namespaceId, endpoint.Id) +} + +// NamespaceDetach modifies a Namespace to remove an endpoint. +func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error { + return RemoveNamespaceEndpoint(namespaceId, endpoint.Id) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go new file mode 100644 index 00000000000..ad30d320d97 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go @@ -0,0 +1,164 @@ +// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server +// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +package hcn + +import ( + "errors" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +var ( + errInvalidNetworkID = errors.New("invalid network ID") + errInvalidEndpointID = errors.New("invalid endpoint ID") + errInvalidNamespaceID = errors.New("invalid namespace ID") + errInvalidLoadBalancerID = errors.New("invalid load balancer ID") + errInvalidRouteID = errors.New("invalid route ID") +) + +func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { + errorFound := false + + if hr != nil { + errorFound = true + } + + result := "" + if resultBuffer != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultBuffer) + if result != "" { + errorFound = true + } + } + + if errorFound { + returnError := new(hr, methodName, result) + logrus.Debugf(returnError.Error()) // HCN errors logged for debugging. + return returnError + } + + return nil +} + +type ErrorCode uint32 + +// For common errors, define the error as it is in windows, so we can quickly determine it later +const ( + ERROR_NOT_FOUND = 0x490 + HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 +) + +type HcnError struct { + *hcserror.HcsError + code ErrorCode +} + +func (e *HcnError) Error() string { + return e.HcsError.Error() +} + +func CheckErrorWithCode(err error, code ErrorCode) bool { + hcnError, ok := err.(*HcnError) + if ok { + return hcnError.code == code + } + return false +} + +func IsElementNotFoundError(err error) bool { + return CheckErrorWithCode(err, ERROR_NOT_FOUND) +} + +func IsPortAlreadyExistsError(err error) bool { + return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS) +} + +func new(hr error, title string, rest string) error { + err := &HcnError{} + hcsError := hcserror.New(hr, title, rest) + err.HcsError = hcsError.(*hcserror.HcsError) + err.code = ErrorCode(hcserror.Win32FromError(hr)) + return err +} + +// +// Note that the below errors are not errors returned by hcn itself +// we wish to seperate them as they are shim usage error +// + +// NetworkNotFoundError results from a failed seach for a network by Id or Name +type NetworkNotFoundError struct { + NetworkName string + NetworkID string +} + +func (e NetworkNotFoundError) Error() string { + if e.NetworkName != "" { + return fmt.Sprintf("Network name %q not found", e.NetworkName) + } + return fmt.Sprintf("Network ID %q not found", e.NetworkID) +} + +// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name +type EndpointNotFoundError struct { + EndpointName string + EndpointID string +} + +func (e EndpointNotFoundError) Error() string { + if e.EndpointName != "" { + return fmt.Sprintf("Endpoint name %q not found", e.EndpointName) + } + return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) +} + +// NamespaceNotFoundError results from a failed seach for a namsepace by Id +type NamespaceNotFoundError struct { + NamespaceID string +} + +func (e NamespaceNotFoundError) Error() string { + return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) +} + +// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id +type LoadBalancerNotFoundError struct { + LoadBalancerId string +} + +func (e LoadBalancerNotFoundError) Error() string { + return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) +} + +// RouteNotFoundError results from a failed seach for a route by Id +type RouteNotFoundError struct { + RouteId string +} + +func (e RouteNotFoundError) Error() string { + return fmt.Sprintf("SDN Route %q not found", e.RouteId) +} + +// IsNotFoundError returns a boolean indicating whether the error was caused by +// a resource not being found. +func IsNotFoundError(err error) bool { + switch pe := err.(type) { + case NetworkNotFoundError: + return true + case EndpointNotFoundError: + return true + case NamespaceNotFoundError: + return true + case LoadBalancerNotFoundError: + return true + case RouteNotFoundError: + return true + case *hcserror.HcsError: + return pe.Err == hcs.ErrElementNotFound + } + return false +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go new file mode 100644 index 00000000000..14903bc5e91 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go @@ -0,0 +1,138 @@ +package hcn + +import ( + "encoding/json" + "fmt" + "math" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// Globals are all global properties of the HCN Service. +type Globals struct { + Version Version `json:"Version"` +} + +// Version is the HCN Service version. +type Version struct { + Major int `json:"Major"` + Minor int `json:"Minor"` +} + +type VersionRange struct { + MinVersion Version + MaxVersion Version +} + +type VersionRanges []VersionRange + +var ( + // HNSVersion1803 added ACL functionality. + HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // V2ApiSupport allows the use of V2 Api calls and V2 Schema. + V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // Remote Subnet allows for Remote Subnet policies on Overlay networks + RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // A Host Route policy allows for local container to local host communication Overlay networks + HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing + DSRVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes + Slash32EndpointPrefixesVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN + AclSupportForProtocol252Version = VersionRanges{ + VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 12.0 allows for session affinity for loadbalancing + SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 11.10+ supports Ipv6 dual stack. + IPv6DualStackVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 13.0 allows for Set Policy support + SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 10.3 allows for VXLAN ports + VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support + L4ProxyPolicyVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + + //HNS 13.2 allows for L4WfpProxy Policy support + L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 14.0 allows for TierAcl Policy support + TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 15.0 allows for NetworkACL Policy support + NetworkACLPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 15.0 allows for NestedIpSet support + NestedIpSetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} +) + +// GetGlobals returns the global properties of the HCN Service. +func GetGlobals() (*Globals, error) { + var version Version + err := hnsCall("GET", "/globals/version", "", &version) + if err != nil { + return nil, err + } + + globals := &Globals{ + Version: version, + } + + return globals, nil +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +func hnsCall(method, path, request string, returnResponse interface{}) error { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return hcserror.New(err, "hnsCall", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return err + } + + if !hnsresponse.Success { + return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go new file mode 100644 index 00000000000..1b434b07b3a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go @@ -0,0 +1,311 @@ +package hcn + +import ( + "encoding/json" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// LoadBalancerPortMapping is associated with HostComputeLoadBalancer +type LoadBalancerPortMapping struct { + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + DistributionType LoadBalancerDistribution `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2 + Flags LoadBalancerPortMappingFlags `json:",omitempty"` +} + +// HostComputeLoadBalancer represents software load balancer. +type HostComputeLoadBalancer struct { + Id string `json:"ID,omitempty"` + HostComputeEndpoints []string `json:",omitempty"` + SourceVIP string `json:",omitempty"` + FrontendVIPs []string `json:",omitempty"` + PortMappings []LoadBalancerPortMapping `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` + Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn +} + +//LoadBalancerFlags modify settings for a loadbalancer. +type LoadBalancerFlags uint32 + +var ( + // LoadBalancerFlagsNone is the default. + LoadBalancerFlagsNone LoadBalancerFlags = 0 + // LoadBalancerFlagsDSR enables Direct Server Return (DSR) + LoadBalancerFlagsDSR LoadBalancerFlags = 1 + LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 +) + +// LoadBalancerPortMappingFlags are special settings on a loadbalancer. +type LoadBalancerPortMappingFlags uint32 + +var ( + // LoadBalancerPortMappingFlagsNone is the default. + LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags + // LoadBalancerPortMappingFlagsILB enables internal loadbalancing. + LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1 + // LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host. + LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2 + // LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP. + LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4 + // LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP. + LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 +) + +// LoadBalancerDistribution specifies how the loadbalancer distributes traffic. +type LoadBalancerDistribution uint32 + +var ( + // LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod. + LoadBalancerDistributionNone LoadBalancerDistribution + // LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod. + LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1 + // LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod. + LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 +) + +func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { + // Open loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeLoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { + // Enumerate all LoadBalancer Guids + var ( + resultBuffer *uint16 + loadBalancerBuffer *uint16 + ) + hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil { + return nil, err + } + + loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer) + var loadBalancerIds []guid.GUID + if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil { + return nil, err + } + + var outputLoadBalancers []HostComputeLoadBalancer + for _, loadBalancerGuid := range loadBalancerIds { + loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) + if err != nil { + return nil, err + } + outputLoadBalancers = append(outputLoadBalancers, *loadBalancer) + } + return outputLoadBalancers, nil +} + +func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { + // Create new loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + loadBalancerGuid := guid.GUID{} + hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeLoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func deleteLoadBalancer(loadBalancerId string) error { + loadBalancerGuid, err := guid.FromString(loadBalancerId) + if err != nil { + return errInvalidLoadBalancerID + } + var resultBuffer *uint16 + hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListLoadBalancers makes a call to list all available loadBalancers. +func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { + hcnQuery := defaultQuery() + loadBalancers, err := ListLoadBalancersQuery(hcnQuery) + if err != nil { + return nil, err + } + return loadBalancers, nil +} + +// ListLoadBalancersQuery makes a call to query the list of available loadBalancers. +func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + loadBalancers, err := enumerateLoadBalancers(string(queryJson)) + if err != nil { + return nil, err + } + return loadBalancers, nil +} + +// GetLoadBalancerByID returns the LoadBalancer specified by Id. +func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": loadBalancerId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + loadBalancers, err := ListLoadBalancersQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(loadBalancers) == 0 { + return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} + } + return &loadBalancers[0], err +} + +// Create LoadBalancer. +func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id) + + jsonString, err := json.Marshal(loadBalancer) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString) + loadBalancer, hcnErr := createLoadBalancer(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return loadBalancer, nil +} + +// Delete LoadBalancer. +func (loadBalancer *HostComputeLoadBalancer) Delete() error { + logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id) + + if err := deleteLoadBalancer(loadBalancer.Id); err != nil { + return err + } + return nil +} + +// AddEndpoint add an endpoint to a LoadBalancer +func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) + + err := loadBalancer.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) + + return loadBalancer.Create() +} + +// RemoveEndpoint removes an endpoint from a LoadBalancer +func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) + + err := loadBalancer.Delete() + if err != nil { + return nil, err + } + + // Create a list of all the endpoints besides the one being removed + var endpoints []string + for _, endpointReference := range loadBalancer.HostComputeEndpoints { + if endpointReference == endpoint.Id { + continue + } + endpoints = append(endpoints, endpointReference) + } + loadBalancer.HostComputeEndpoints = endpoints + return loadBalancer.Create() +} + +// AddLoadBalancer for the specified endpoints +func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort) + + loadBalancer := &HostComputeLoadBalancer{ + SourceVIP: sourceVIP, + PortMappings: []LoadBalancerPortMapping{ + { + Protocol: uint32(protocol), + InternalPort: internalPort, + ExternalPort: externalPort, + Flags: portMappingFlags, + }, + }, + FrontendVIPs: frontendVIPs, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + Flags: flags, + } + + for _, endpoint := range endpoints { + loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) + } + + return loadBalancer.Create() +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go new file mode 100644 index 00000000000..d2ef2296099 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go @@ -0,0 +1,446 @@ +package hcn + +import ( + "encoding/json" + "os" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + icni "github.com/Microsoft/hcsshim/internal/cni" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/regstate" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/sirupsen/logrus" +) + +// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace. +type NamespaceResourceEndpoint struct { + Id string `json:"ID,"` +} + +// NamespaceResourceContainer represents a Container attached to a Namespace. +type NamespaceResourceContainer struct { + Id string `json:"ID,"` +} + +// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint. +type NamespaceResourceType string + +var ( + // NamespaceResourceTypeContainer are contianers associated with a Namespace. + NamespaceResourceTypeContainer NamespaceResourceType = "Container" + // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. + NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" +) + +// NamespaceResource is associated with a namespace +type NamespaceResource struct { + Type NamespaceResourceType `json:","` // Container, Endpoint + Data json.RawMessage `json:","` +} + +// NamespaceType determines whether the Namespace is for a Host or Guest +type NamespaceType string + +var ( + // NamespaceTypeHost are host namespaces. + NamespaceTypeHost NamespaceType = "Host" + // NamespaceTypeHostDefault are host namespaces in the default compartment. + NamespaceTypeHostDefault NamespaceType = "HostDefault" + // NamespaceTypeGuest are guest namespaces. + NamespaceTypeGuest NamespaceType = "Guest" + // NamespaceTypeGuestDefault are guest namespaces in the default compartment. + NamespaceTypeGuestDefault NamespaceType = "GuestDefault" +) + +// HostComputeNamespace represents a namespace (AKA compartment) in +type HostComputeNamespace struct { + Id string `json:"ID,omitempty"` + NamespaceId uint32 `json:",omitempty"` + Type NamespaceType `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault + Resources []NamespaceResource `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace. +// Used to Add/Remove an endpoints and containers to/from a namespace. +type ModifyNamespaceSettingRequest struct { + ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { + // Open namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNamespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { + // Enumerate all Namespace Guids + var ( + resultBuffer *uint16 + namespaceBuffer *uint16 + ) + hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil { + return nil, err + } + + namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer) + var namespaceIds []guid.GUID + if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil { + return nil, err + } + + var outputNamespaces []HostComputeNamespace + for _, namespaceGuid := range namespaceIds { + namespace, err := getNamespace(namespaceGuid, query) + if err != nil { + return nil, err + } + outputNamespaces = append(outputNamespaces, *namespace) + } + return outputNamespaces, nil +} + +func createNamespace(settings string) (*HostComputeNamespace, error) { + // Create new namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + namespaceGuid := guid.GUID{} + hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNamespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { + namespaceGuid, err := guid.FromString(namespaceId) + if err != nil { + return nil, errInvalidNamespaceID + } + // Open namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Modify namespace. + hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to Namespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func deleteNamespace(namespaceId string) error { + namespaceGuid, err := guid.FromString(namespaceId) + if err != nil { + return errInvalidNamespaceID + } + var resultBuffer *uint16 + hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListNamespaces makes a call to list all available namespaces. +func ListNamespaces() ([]HostComputeNamespace, error) { + hcnQuery := defaultQuery() + namespaces, err := ListNamespacesQuery(hcnQuery) + if err != nil { + return nil, err + } + return namespaces, nil +} + +// ListNamespacesQuery makes a call to query the list of available namespaces. +func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + namespaces, err := enumerateNamespaces(string(queryJson)) + if err != nil { + return nil, err + } + return namespaces, nil +} + +// GetNamespaceByID returns the Namespace specified by Id. +func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": namespaceId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + namespaces, err := ListNamespacesQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(namespaces) == 0 { + return nil, NamespaceNotFoundError{NamespaceID: namespaceId} + } + + return &namespaces[0], err +} + +// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. +func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceId) + if err != nil { + return nil, err + } + var endpointsIds []string + for _, resource := range namespace.Resources { + if resource.Type == "Endpoint" { + var endpointResource NamespaceResourceEndpoint + if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil { + return nil, err + } + endpointsIds = append(endpointsIds, endpointResource.Id) + } + } + return endpointsIds, nil +} + +// GetNamespaceContainerIds returns the containers of the Namespace specified by Id. +func GetNamespaceContainerIds(namespaceId string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceId) + if err != nil { + return nil, err + } + var containerIds []string + for _, resource := range namespace.Resources { + if resource.Type == "Container" { + var contaienrResource NamespaceResourceContainer + if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil { + return nil, err + } + containerIds = append(containerIds, contaienrResource.Id) + } + } + return containerIds, nil +} + +// NewNamespace creates a new Namespace object +func NewNamespace(nsType NamespaceType) *HostComputeNamespace { + return &HostComputeNamespace{ + Type: nsType, + SchemaVersion: V2SchemaVersion(), + } +} + +// Create Namespace. +func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) { + logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id) + + jsonString, err := json.Marshal(namespace) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString) + namespace, hcnErr := createNamespace(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return namespace, nil +} + +// Delete Namespace. +func (namespace *HostComputeNamespace) Delete() error { + logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id) + + if err := deleteNamespace(namespace.Id); err != nil { + return err + } + return nil +} + +// Sync Namespace endpoints with the appropriate sandbox container holding the +// network namespace open. If no sandbox container is found for this namespace +// this method is determined to be a success and will not return an error in +// this case. If the sandbox container is found and a sync is initiated any +// failures will be returned via this method. +// +// This call initiates a sync between endpoints and the matching UtilityVM +// hosting those endpoints. It is safe to call for any `NamespaceType` but +// `NamespaceTypeGuest` is the only case when a sync will actually occur. For +// `NamespaceTypeHost` the process container will be automatically synchronized +// when the the endpoint is added via `AddNamespaceEndpoint`. +// +// Note: This method sync's both additions and removals of endpoints from a +// `NamespaceTypeGuest` namespace. +func (namespace *HostComputeNamespace) Sync() error { + logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync") + + // We only attempt a sync for namespace guest. + if namespace.Type != NamespaceTypeGuest { + return nil + } + + // Look in the registry for the key to map from namespace id to pod-id + cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id) + if err != nil { + if regstate.IsNotFoundError(err) { + return nil + } + return err + } + req := runhcs.VMRequest{ + ID: cfg.ContainerID, + Op: runhcs.OpSyncNamespace, + } + shimPath := runhcs.VMPipePath(cfg.HostUniqueID) + if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { + // The shim is likey gone. Simply ignore the sync as if it didn't exist. + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + // Remove the reg key there is no point to try again + _ = cfg.Remove() + return nil + } + f := map[string]interface{}{ + "id": namespace.Id, + "container-id": cfg.ContainerID, + } + logrus.WithFields(f). + WithError(err). + Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath) + return err + } + return nil +} + +// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. +func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { + logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) + + namespaceSettings, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyNamespace(namespaceId, string(namespaceSettings)) + if err != nil { + return err + } + return nil +} + +// AddNamespaceEndpoint adds an endpoint to a Namespace. +func AddNamespaceEndpoint(namespaceId string, endpointId string) error { + logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) + + mapA := map[string]string{"EndpointId": endpointId} + settingsJson, err := json.Marshal(mapA) + if err != nil { + return err + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + return ModifyNamespaceSettings(namespaceId, requestMessage) +} + +// RemoveNamespaceEndpoint removes an endpoint from a Namespace. +func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { + logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) + + mapA := map[string]string{"EndpointId": endpointId} + settingsJson, err := json.Marshal(mapA) + if err != nil { + return err + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeRemove, + Settings: settingsJson, + } + + return ModifyNamespaceSettings(namespaceId, requestMessage) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go new file mode 100644 index 00000000000..c36b136387a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go @@ -0,0 +1,462 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// Route is associated with a subnet. +type Route struct { + NextHop string `json:",omitempty"` + DestinationPrefix string `json:",omitempty"` + Metric uint16 `json:",omitempty"` +} + +// Subnet is associated with a Ipam. +type Subnet struct { + IpAddressPrefix string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + Routes []Route `json:",omitempty"` +} + +// Ipam (Internet Protocol Address Management) is associated with a network +// and represents the address space(s) of a network. +type Ipam struct { + Type string `json:",omitempty"` // Ex: Static, DHCP + Subnets []Subnet `json:",omitempty"` +} + +// MacRange is associated with MacPool and respresents the start and end addresses. +type MacRange struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// MacPool is associated with a network and represents pool of MacRanges. +type MacPool struct { + Ranges []MacRange `json:",omitempty"` +} + +// Dns (Domain Name System is associated with a network). +type Dns struct { + Domain string `json:",omitempty"` + Search []string `json:",omitempty"` + ServerList []string `json:",omitempty"` + Options []string `json:",omitempty"` +} + +// NetworkType are various networks. +type NetworkType string + +// NetworkType const +const ( + NAT NetworkType = "NAT" + Transparent NetworkType = "Transparent" + L2Bridge NetworkType = "L2Bridge" + L2Tunnel NetworkType = "L2Tunnel" + ICS NetworkType = "ICS" + Private NetworkType = "Private" + Overlay NetworkType = "Overlay" +) + +// NetworkFlags are various network flags. +type NetworkFlags uint32 + +// NetworkFlags const +const ( + None NetworkFlags = 0 + EnableNonPersistent NetworkFlags = 8 +) + +// HostComputeNetwork represents a network +type HostComputeNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type NetworkType `json:",omitempty"` + Policies []NetworkPolicy `json:",omitempty"` + MacPool MacPool `json:",omitempty"` + Dns Dns `json:",omitempty"` + Ipams []Ipam `json:",omitempty"` + Flags NetworkFlags `json:",omitempty"` // 0: None + Health Health `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// NetworkResourceType are the 3 different Network settings resources. +type NetworkResourceType string + +var ( + // NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet + NetworkResourceTypePolicy NetworkResourceType = "Policy" + // NetworkResourceTypeDNS is for Network's DNS settings. + NetworkResourceTypeDNS NetworkResourceType = "DNS" + // NetworkResourceTypeExtension is for Network's extension settings. + NetworkResourceTypeExtension NetworkResourceType = "Extension" +) + +// ModifyNetworkSettingRequest is the structure used to send request to modify an network. +// Used to update DNS/extension/policy on an network. +type ModifyNetworkSettingRequest struct { + ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +type PolicyNetworkRequest struct { + Policies []NetworkPolicy `json:",omitempty"` +} + +func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { + // Open network. + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func enumerateNetworks(query string) ([]HostComputeNetwork, error) { + // Enumerate all Network Guids + var ( + resultBuffer *uint16 + networkBuffer *uint16 + ) + hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil { + return nil, err + } + + networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer) + var networkIds []guid.GUID + if err := json.Unmarshal([]byte(networks), &networkIds); err != nil { + return nil, err + } + + var outputNetworks []HostComputeNetwork + for _, networkGuid := range networkIds { + network, err := getNetwork(networkGuid, query) + if err != nil { + return nil, err + } + outputNetworks = append(outputNetworks, *network) + } + return outputNetworks, nil +} + +func createNetwork(settings string) (*HostComputeNetwork, error) { + // Create new network. + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + networkGuid := guid.GUID{} + hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { + networkGuid, err := guid.FromString(networkId) + if err != nil { + return nil, errInvalidNetworkID + } + // Open Network + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Modify Network + hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func deleteNetwork(networkId string) error { + networkGuid, err := guid.FromString(networkId) + if err != nil { + return errInvalidNetworkID + } + var resultBuffer *uint16 + hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListNetworks makes a call to list all available networks. +func ListNetworks() ([]HostComputeNetwork, error) { + hcnQuery := defaultQuery() + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + return networks, nil +} + +// ListNetworksQuery makes a call to query the list of available networks. +func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + networks, err := enumerateNetworks(string(queryJson)) + if err != nil { + return nil, err + } + return networks, nil +} + +// GetNetworkByID returns the network specified by Id. +func GetNetworkByID(networkID string) (*HostComputeNetwork, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": networkID} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(networks) == 0 { + return nil, NetworkNotFoundError{NetworkID: networkID} + } + return &networks[0], err +} + +// GetNetworkByName returns the network specified by Name. +func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"Name": networkName} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(networks) == 0 { + return nil, NetworkNotFoundError{NetworkName: networkName} + } + return &networks[0], err +} + +// Create Network. +func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { + logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) + for _, ipam := range network.Ipams { + for _, subnet := range ipam.Subnets { + if subnet.IpAddressPrefix != "" { + hasDefault := false + for _, route := range subnet.Routes { + if route.NextHop == "" { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { + hasDefault = true + } + } + if !hasDefault { + return nil, errors.New("network create error, no default gateway") + } + } + } + } + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString) + network, hcnErr := createNetwork(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return network, nil +} + +// Delete Network. +func (network *HostComputeNetwork) Delete() error { + logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id) + + if err := deleteNetwork(network.Id); err != nil { + return err + } + return nil +} + +// ModifyNetworkSettings updates the Policy for a network. +func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id) + + networkSettingsRequest, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyNetwork(network.Id, string(networkSettingsRequest)) + if err != nil { + return err + } + return nil +} + +// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network. +func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) + + settingsJson, err := json.Marshal(networkPolicy) + if err != nil { + return err + } + requestMessage := &ModifyNetworkSettingRequest{ + ResourceType: NetworkResourceTypePolicy, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + return network.ModifyNetworkSettings(requestMessage) +} + +// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network. +func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) + + settingsJson, err := json.Marshal(networkPolicy) + if err != nil { + return err + } + requestMessage := &ModifyNetworkSettingRequest{ + ResourceType: NetworkResourceTypePolicy, + RequestType: RequestTypeRemove, + Settings: settingsJson, + } + + return network.ModifyNetworkSettings(requestMessage) +} + +// CreateEndpoint creates an endpoint on the Network. +func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { + isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0 + logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote) + + endpoint.HostComputeNetwork = network.Id + endpointSettings, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + newEndpoint, err := createEndpoint(network.Id, string(endpointSettings)) + if err != nil { + return nil, err + } + return newEndpoint, nil +} + +// CreateRemoteEndpoint creates a remote endpoint on the Network. +func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { + endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags + return network.CreateEndpoint(endpoint) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go new file mode 100644 index 00000000000..c2aa599f339 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go @@ -0,0 +1,344 @@ +package hcn + +import ( + "encoding/json" +) + +// EndpointPolicyType are the potential Policies that apply to Endpoints. +type EndpointPolicyType string + +// EndpointPolicyType const +const ( + PortMapping EndpointPolicyType = "PortMapping" + ACL EndpointPolicyType = "ACL" + QOS EndpointPolicyType = "QOS" + L2Driver EndpointPolicyType = "L2Driver" + OutBoundNAT EndpointPolicyType = "OutBoundNAT" + SDNRoute EndpointPolicyType = "SDNRoute" + L4Proxy EndpointPolicyType = "L4Proxy" + L4WFPPROXY EndpointPolicyType = "L4WFPPROXY" + PortName EndpointPolicyType = "PortName" + EncapOverhead EndpointPolicyType = "EncapOverhead" + IOV EndpointPolicyType = "Iov" + // Endpoint and Network have InterfaceConstraint and ProviderAddress + NetworkProviderAddress EndpointPolicyType = "ProviderAddress" + NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" + TierAcl EndpointPolicyType = "TierAcl" +) + +// EndpointPolicy is a collection of Policy settings for an Endpoint. +type EndpointPolicy struct { + Type EndpointPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// NetworkPolicyType are the potential Policies that apply to Networks. +type NetworkPolicyType string + +// NetworkPolicyType const +const ( + SourceMacAddress NetworkPolicyType = "SourceMacAddress" + NetAdapterName NetworkPolicyType = "NetAdapterName" + VSwitchExtension NetworkPolicyType = "VSwitchExtension" + DrMacAddress NetworkPolicyType = "DrMacAddress" + AutomaticDNS NetworkPolicyType = "AutomaticDNS" + InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" + ProviderAddress NetworkPolicyType = "ProviderAddress" + RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" + VxlanPort NetworkPolicyType = "VxlanPort" + HostRoute NetworkPolicyType = "HostRoute" + SetPolicy NetworkPolicyType = "SetPolicy" + NetworkL4Proxy NetworkPolicyType = "L4Proxy" + LayerConstraint NetworkPolicyType = "LayerConstraint" + NetworkACL NetworkPolicyType = "NetworkACL" +) + +// NetworkPolicy is a collection of Policy settings for a Network. +type NetworkPolicy struct { + Type NetworkPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// SubnetPolicyType are the potential Policies that apply to Subnets. +type SubnetPolicyType string + +// SubnetPolicyType const +const ( + VLAN SubnetPolicyType = "VLAN" + VSID SubnetPolicyType = "VSID" +) + +// SubnetPolicy is a collection of Policy settings for a Subnet. +type SubnetPolicy struct { + Type SubnetPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// NatFlags are flags for portmappings. +type NatFlags uint32 + +const ( + NatFlagsNone NatFlags = iota + NatFlagsLocalRoutedVip + NatFlagsIPv6 +) + +/// Endpoint Policy objects + +// PortMappingPolicySetting defines Port Mapping (NAT) +type PortMappingPolicySetting struct { + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + VIP string `json:",omitempty"` + Flags NatFlags `json:",omitempty"` +} + +// ActionType associated with ACLs. Value is either Allow or Block. +type ActionType string + +// DirectionType associated with ACLs. Value is either In or Out. +type DirectionType string + +// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP). +type RuleType string + +const ( + // Allow traffic + ActionTypeAllow ActionType = "Allow" + // Block traffic + ActionTypeBlock ActionType = "Block" + // Pass traffic + ActionTypePass ActionType = "Pass" + + // In is traffic coming to the Endpoint + DirectionTypeIn DirectionType = "In" + // Out is traffic leaving the Endpoint + DirectionTypeOut DirectionType = "Out" + + // Host creates WFP (Windows Firewall) rules + RuleTypeHost RuleType = "Host" + // Switch creates VFP (Virtual Filter Platform) rules + RuleTypeSwitch RuleType = "Switch" +) + +// AclPolicySetting creates firewall rules on an endpoint +type AclPolicySetting struct { + Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) + Action ActionType `json:","` + Direction DirectionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + RuleType RuleType `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. +type QosPolicySetting struct { + MaximumOutgoingBandwidthInBytes uint64 +} + +// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. +type OutboundNatPolicySetting struct { + VirtualIP string `json:",omitempty"` + Exceptions []string `json:",omitempty"` + Destinations []string `json:",omitempty"` + Flags NatFlags `json:",omitempty"` +} + +// SDNRoutePolicySetting sets SDN Route on an Endpoint. +type SDNRoutePolicySetting struct { + DestinationPrefix string `json:",omitempty"` + NextHop string `json:",omitempty"` + NeedEncap bool `json:",omitempty"` +} + +// NetworkACLPolicySetting creates ACL rules on a network +type NetworkACLPolicySetting struct { + Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) + Action ActionType `json:","` + Direction DirectionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + RuleType RuleType `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// FiveTuple is nested in L4ProxyPolicySetting for WFP support. +type FiveTuple struct { + Protocols string `json:",omitempty"` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// ProxyExceptions exempts traffic to IpAddresses and Ports +type ProxyExceptions struct { + IpAddressExceptions []string `json:",omitempty"` + PortExceptions []string `json:",omitempty"` +} + +// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint. +type L4WfpProxyPolicySetting struct { + InboundProxyPort string `json:",omitempty"` + OutboundProxyPort string `json:",omitempty"` + FilterTuple FiveTuple `json:",omitempty"` + UserSID string `json:",omitempty"` + InboundExceptions ProxyExceptions `json:",omitempty"` + OutboundExceptions ProxyExceptions `json:",omitempty"` +} + +// PortnameEndpointPolicySetting sets the port name for an endpoint. +type PortnameEndpointPolicySetting struct { + Name string `json:",omitempty"` +} + +// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint. +type EncapOverheadEndpointPolicySetting struct { + Overhead uint16 `json:",omitempty"` +} + +// IovPolicySetting sets the Iov settings for an endpoint. +type IovPolicySetting struct { + IovOffloadWeight uint32 `json:",omitempty"` + QueuePairsRequested uint32 `json:",omitempty"` + InterruptModeration uint32 `json:",omitempty"` +} + +/// Endpoint and Network Policy objects + +// ProviderAddressEndpointPolicySetting sets the PA for an endpoint. +type ProviderAddressEndpointPolicySetting struct { + ProviderAddress string `json:",omitempty"` +} + +// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic. +type InterfaceConstraintPolicySetting struct { + InterfaceGuid string `json:",omitempty"` + InterfaceLuid uint64 `json:",omitempty"` + InterfaceIndex uint32 `json:",omitempty"` + InterfaceMediaType uint32 `json:",omitempty"` + InterfaceAlias string `json:",omitempty"` + InterfaceDescription string `json:",omitempty"` +} + +/// Network Policy objects + +// SourceMacAddressNetworkPolicySetting sets source MAC for a network. +type SourceMacAddressNetworkPolicySetting struct { + SourceMacAddress string `json:",omitempty"` +} + +// NetAdapterNameNetworkPolicySetting sets network adapter of a network. +type NetAdapterNameNetworkPolicySetting struct { + NetworkAdapterName string `json:",omitempty"` +} + +// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network. +type VSwitchExtensionNetworkPolicySetting struct { + ExtensionID string `json:",omitempty"` + Enable bool `json:",omitempty"` +} + +// DrMacAddressNetworkPolicySetting sets the DR MAC for a network. +type DrMacAddressNetworkPolicySetting struct { + Address string `json:",omitempty"` +} + +// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network. +type AutomaticDNSNetworkPolicySetting struct { + Enable bool `json:",omitempty"` +} + +type LayerConstraintNetworkPolicySetting struct { + LayerId string `json:",omitempty"` +} + +/// Subnet Policy objects + +// VlanPolicySetting isolates a subnet with VLAN tagging. +type VlanPolicySetting struct { + IsolationId uint32 `json:","` +} + +// VsidPolicySetting isolates a subnet with VSID tagging. +type VsidPolicySetting struct { + IsolationId uint32 `json:","` +} + +// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network +type RemoteSubnetRoutePolicySetting struct { + DestinationPrefix string + IsolationId uint16 + ProviderAddress string + DistributedRouterMacAddress string +} + +// SetPolicyTypes associated with SetPolicy. Value is IPSET. +type SetPolicyType string + +const ( + SetPolicyTypeIpSet SetPolicyType = "IPSET" + SetPolicyTypeNestedIpSet SetPolicyType = "NESTEDIPSET" +) + +// SetPolicySetting creates IPSets on network +type SetPolicySetting struct { + Id string + Name string + Type SetPolicyType + Values string +} + +// VxlanPortPolicySetting allows configuring the VXLAN TCP port +type VxlanPortPolicySetting struct { + Port uint16 +} + +// ProtocolType associated with L4ProxyPolicy +type ProtocolType uint32 + +const ( + ProtocolTypeUnknown ProtocolType = 0 + ProtocolTypeICMPv4 ProtocolType = 1 + ProtocolTypeIGMP ProtocolType = 2 + ProtocolTypeTCP ProtocolType = 6 + ProtocolTypeUDP ProtocolType = 17 + ProtocolTypeICMPv6 ProtocolType = 58 +) + +//L4ProxyPolicySetting applies proxy policy on network/endpoint +type L4ProxyPolicySetting struct { + IP string `json:",omitempty"` + Port string `json:",omitempty"` + Protocol ProtocolType `json:",omitempty"` + Exceptions []string `json:",omitempty"` + Destination string + OutboundNAT bool `json:",omitempty"` +} + +// TierAclRule represents an ACL within TierAclPolicySetting +type TierAclRule struct { + Id string `json:",omitempty"` + Protocols string `json:",omitempty"` + TierAclRuleAction ActionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// TierAclPolicySetting represents a Tier containing ACLs +type TierAclPolicySetting struct { + Name string `json:","` + Direction DirectionType `json:","` + Order uint16 `json:""` + TierAclRules []TierAclRule `json:",omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go new file mode 100644 index 00000000000..52e24984624 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go @@ -0,0 +1,266 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// HostComputeRoute represents SDN routes. +type HostComputeRoute struct { + ID string `json:"ID,omitempty"` + HostComputeEndpoints []string `json:",omitempty"` + Setting []SDNRoutePolicySetting `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// ListRoutes makes a call to list all available routes. +func ListRoutes() ([]HostComputeRoute, error) { + hcnQuery := defaultQuery() + routes, err := ListRoutesQuery(hcnQuery) + if err != nil { + return nil, err + } + return routes, nil +} + +// ListRoutesQuery makes a call to query the list of available routes. +func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) { + queryJSON, err := json.Marshal(query) + if err != nil { + return nil, err + } + + routes, err := enumerateRoutes(string(queryJSON)) + if err != nil { + return nil, err + } + return routes, nil +} + +// GetRouteByID returns the route specified by Id. +func GetRouteByID(routeID string) (*HostComputeRoute, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": routeID} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + routes, err := ListRoutesQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(routes) == 0 { + return nil, RouteNotFoundError{RouteId: routeID} + } + return &routes[0], err +} + +// Create Route. +func (route *HostComputeRoute) Create() (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID) + + jsonString, err := json.Marshal(route) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString) + route, hcnErr := createRoute(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return route, nil +} + +// Delete Route. +func (route *HostComputeRoute) Delete() error { + logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID) + + existingRoute, _ := GetRouteByID(route.ID) + + if existingRoute != nil { + if err := deleteRoute(route.ID); err != nil { + return err + } + } + + return nil +} + +// AddEndpoint add an endpoint to a route +// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add +func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) + + err := route.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) + + return route.Create() +} + +// RemoveEndpoint removes an endpoint from a route +// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add +func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) + + err := route.Delete() + if err != nil { + return nil, err + } + + // Create a list of all the endpoints besides the one being removed + i := 0 + for index, endpointReference := range route.HostComputeEndpoints { + if endpointReference == endpoint.Id { + i = index + break + } + } + + route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...) + return route.Create() +} + +// AddRoute for the specified endpoints and SDN Route setting +func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) + + if len(endpoints) <= 0 { + return nil, errors.New("missing endpoints") + } + + route := &HostComputeRoute{ + SchemaVersion: V2SchemaVersion(), + Setting: []SDNRoutePolicySetting{ + { + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + NeedEncap: needEncapsulation, + }, + }, + } + + for _, endpoint := range endpoints { + route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) + } + + return route.Create() +} + +func enumerateRoutes(query string) ([]HostComputeRoute, error) { + // Enumerate all routes Guids + var ( + resultBuffer *uint16 + routeBuffer *uint16 + ) + hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil { + return nil, err + } + + routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer) + var routeIds []guid.GUID + if err := json.Unmarshal([]byte(routes), &routeIds); err != nil { + return nil, err + } + + var outputRoutes []HostComputeRoute + for _, routeGUID := range routeIds { + route, err := getRoute(routeGUID, query) + if err != nil { + return nil, err + } + outputRoutes = append(outputRoutes, *route) + } + return outputRoutes, nil +} + +func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) { + // Open routes. + var ( + routeHandle hcnRoute + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer) + if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil { + return nil, err + } + // Query routes. + hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close routes. + hr = hcnCloseRoute(routeHandle) + if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeRoute + var outputRoute HostComputeRoute + if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { + return nil, err + } + return &outputRoute, nil +} + +func createRoute(settings string) (*HostComputeRoute, error) { + // Create new route. + var ( + routeHandle hcnRoute + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + routeGUID := guid.GUID{} + hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer) + if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil { + return nil, err + } + // Query route. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close Route. + hr = hcnCloseRoute(routeHandle) + if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeRoute + var outputRoute HostComputeRoute + if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { + return nil, err + } + return &outputRoute, nil +} + +func deleteRoute(routeID string) error { + routeGUID, err := guid.FromString(routeID) + if err != nil { + return errInvalidRouteID + } + var resultBuffer *uint16 + hr := hcnDeleteRoute(&routeGUID, &resultBuffer) + if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil { + return err + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go new file mode 100644 index 00000000000..bacb91fedae --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go @@ -0,0 +1,147 @@ +package hcn + +import ( + "fmt" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work + // multiple times. + featuresOnce sync.Once + featuresErr error + supportedFeatures SupportedFeatures +) + +// SupportedFeatures are the features provided by the Service. +type SupportedFeatures struct { + Acl AclFeatures `json:"ACL"` + Api ApiSupport `json:"API"` + RemoteSubnet bool `json:"RemoteSubnet"` + HostRoute bool `json:"HostRoute"` + DSR bool `json:"DSR"` + Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` + AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` + SessionAffinity bool `json:"SessionAffinity"` + IPv6DualStack bool `json:"IPv6DualStack"` + SetPolicy bool `json:"SetPolicy"` + VxlanPort bool `json:"VxlanPort"` + L4Proxy bool `json:"L4Proxy"` // network policy that applies VFP rules to all endpoints on the network to redirect traffic + L4WfpProxy bool `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint + TierAcl bool `json:"TierAcl"` + NetworkACL bool `json:"NetworkACL"` + NestedIpSet bool `json:"NestedIpSet"` +} + +// AclFeatures are the supported ACL possibilities. +type AclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +// ApiSupport lists the supported API versions. +type ApiSupport struct { + V1 bool `json:"V1"` + V2 bool `json:"V2"` +} + +// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called +// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the +// various hcn.IsXSupported methods need to be made. +func GetCachedSupportedFeatures() (SupportedFeatures, error) { + // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to + // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant + // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy + // for example. + featuresOnce.Do(func() { + supportedFeatures, featuresErr = getSupportedFeatures() + }) + + return supportedFeatures, featuresErr +} + +// GetSupportedFeatures returns the features supported by the Service. +// +// Deprecated: Use GetCachedSupportedFeatures instead. +func GetSupportedFeatures() SupportedFeatures { + features, err := GetCachedSupportedFeatures() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.WithError(err).Errorf("unable to obtain supported features") + return features + } + return features +} + +func getSupportedFeatures() (SupportedFeatures, error) { + var features SupportedFeatures + globals, err := GetGlobals() + if err != nil { + // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. + return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") + } + features.Acl = AclFeatures{ + AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isFeatureSupported(globals.Version, HNSVersion1803), + } + + features.Api = ApiSupport{ + V2: isFeatureSupported(globals.Version, V2ApiSupport), + V1: true, // HNSCall is still available. + } + + features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) + features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) + features.DSR = isFeatureSupported(globals.Version, DSRVersion) + features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) + features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) + features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) + features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion) + features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion) + features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion) + features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion) + features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion) + features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion) + features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion) + features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion) + + logrus.WithFields(logrus.Fields{ + "version": fmt.Sprintf("%+v", globals.Version), + "supportedFeatures": fmt.Sprintf("%+v", features), + }).Info("HCN feature check") + + return features, nil +} + +func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { + isFeatureSupported := false + + for _, versionRange := range versionsSupported { + isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange) + } + + return isFeatureSupported +} + +func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool { + if currentVersion.Major < versionRange.MinVersion.Major { + return false + } + if currentVersion.Major > versionRange.MaxVersion.Major { + return false + } + if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor { + return false + } + if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor { + return false + } + return true +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go new file mode 100644 index 00000000000..7ec5b58b66a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go @@ -0,0 +1,795 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcn + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") + procHNSCall = modvmcompute.NewProc("HNSCall") + procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") + procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") + procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") + procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") + procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") + procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") + procHcnCloseNetwork = modcomputenetwork.NewProc("HcnCloseNetwork") + procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") + procHcnCreateEndpoint = modcomputenetwork.NewProc("HcnCreateEndpoint") + procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") + procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") + procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") + procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") + procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") + procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") + procHcnCreateNamespace = modcomputenetwork.NewProc("HcnCreateNamespace") + procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") + procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") + procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") + procHcnDeleteNamespace = modcomputenetwork.NewProc("HcnDeleteNamespace") + procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") + procHcnEnumerateLoadBalancers = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers") + procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") + procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") + procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") + procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") + procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") + procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") + procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes") + procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") + procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") + procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute") + procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties") + procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") + procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateNetworks(_p0, networks, result) +} + +func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateNetworks.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateNetwork(id, _p0, network, result) +} + +func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { + if hr = procHcnCreateNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { + if hr = procHcnOpenNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyNetwork(network, _p0, result) +} + +func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryNetworkProperties(network, _p0, properties, result) +} + +func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryNetworkProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseNetwork(network hcnNetwork) (hr error) { + if hr = procHcnCloseNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateEndpoints(_p0, endpoints, result) +} + +func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateEndpoints.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateEndpoint(network, id, _p0, endpoint, result) +} + +func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { + if hr = procHcnCreateEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { + if hr = procHcnOpenEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyEndpoint(endpoint, _p0, result) +} + +func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) +} + +func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryEndpointProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { + if hr = procHcnCloseEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateNamespaces(_p0, namespaces, result) +} + +func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateNamespaces.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateNamespace(id, _p0, namespace, result) +} + +func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { + if hr = procHcnCreateNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { + if hr = procHcnOpenNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyNamespace(namespace, _p0, result) +} + +func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) +} + +func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryNamespaceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseNamespace(namespace hcnNamespace) (hr error) { + if hr = procHcnCloseNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) +} + +func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) +} + +func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + if hr = procHcnCreateLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + if hr = procHcnOpenLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyLoadBalancer(loadBalancer, _p0, result) +} + +func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) +} + +func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { + if hr = procHcnCloseLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateRoutes(_p0, routes, result) +} + +func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateRoute(id, _p0, route, result) +} + +func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { + if hr = procHcnCreateSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { + if hr = procHcnOpenSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyRoute(route, _p0, result) +} + +func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifySdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryRouteProperties(route, _p0, properties, result) +} + +func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseRoute(route hcnRoute) (hr error) { + if hr = procHcnCloseSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go new file mode 100644 index 00000000000..4a4fcea843f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go @@ -0,0 +1,110 @@ +package cni + +import ( + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/regstate" +) + +const ( + cniRoot = "cni" + cniKey = "cfg" +) + +// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM +// map. +type PersistedNamespaceConfig struct { + namespaceID string + stored bool + + ContainerID string + HostUniqueID guid.GUID +} + +// NewPersistedNamespaceConfig creates an in-memory namespace config that can be +// persisted to the registry. +func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig { + return &PersistedNamespaceConfig{ + namespaceID: namespaceID, + ContainerID: containerID, + HostUniqueID: containerHostUniqueID, + } +} + +// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches +// `namespaceID`. If not found returns `regstate.NotFoundError` +func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) { + sk, err := regstate.Open(cniRoot, false) + if err != nil { + return nil, err + } + defer sk.Close() + + pnc := PersistedNamespaceConfig{ + namespaceID: namespaceID, + stored: true, + } + if err := sk.Get(namespaceID, cniKey, &pnc); err != nil { + return nil, err + } + return &pnc, nil +} + +// Store stores or updates the in-memory config to its registry state. If the +// store failes returns the store error. +func (pnc *PersistedNamespaceConfig) Store() error { + if pnc.namespaceID == "" { + return errors.New("invalid namespaceID ''") + } + if pnc.ContainerID == "" { + return errors.New("invalid containerID ''") + } + empty := guid.GUID{} + if pnc.HostUniqueID == empty { + return errors.New("invalid containerHostUniqueID 'empy'") + } + sk, err := regstate.Open(cniRoot, false) + if err != nil { + return err + } + defer sk.Close() + + if pnc.stored { + if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil { + return err + } + } else { + if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil { + return err + } + } + pnc.stored = true + return nil +} + +// Remove removes any persisted state associated with this config. If the config +// is not found in the registery `Remove` returns no error. +func (pnc *PersistedNamespaceConfig) Remove() error { + if pnc.stored { + sk, err := regstate.Open(cniRoot, false) + if err != nil { + if regstate.IsNotFoundError(err) { + pnc.stored = false + return nil + } + return err + } + defer sk.Close() + + if err := sk.Remove(pnc.namespaceID); err != nil { + if regstate.IsNotFoundError(err) { + pnc.stored = false + return nil + } + return err + } + } + pnc.stored = false + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go new file mode 100644 index 00000000000..f46af33bb65 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -0,0 +1,97 @@ +package cow + +import ( + "context" + "io" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Process is the interface for an OS process running in a container or utility VM. +type Process interface { + // Close releases resources associated with the process and closes the + // writer and readers returned by Stdio. Depending on the implementation, + // this may also terminate the process. + Close() error + // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever + // is appropriate to indicate that no more data is available. + CloseStdin(ctx context.Context) error + // CloseStdout closes the stdout connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStdout(ctx context.Context) error + // CloseStderr closes the stderr connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStderr(ctx context.Context) error + // Pid returns the process ID. + Pid() int + // Stdio returns the stdio streams for a process. These may be nil if a stream + // was not requested during CreateProcess. + Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) + // ResizeConsole resizes the virtual terminal associated with the process. + ResizeConsole(ctx context.Context, width, height uint16) error + // Kill sends a SIGKILL or equivalent signal to the process and returns whether + // the signal was delivered. It does not wait for the process to terminate. + Kill(ctx context.Context) (bool, error) + // Signal sends a signal to the process and returns whether the signal was + // delivered. The input is OS specific (either + // guestrequest.SignalProcessOptionsWCOW or + // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process + // to terminate. + Signal(ctx context.Context, options interface{}) (bool, error) + // Wait waits for the process to complete, or for a connection to the process to be + // terminated by some error condition (including calling Close). + Wait() error + // ExitCode returns the exit code of the process. Returns an error if the process is + // not running. + ExitCode() (int, error) +} + +// ProcessHost is the interface for creating processes. +type ProcessHost interface { + // CreateProcess creates a process. The configuration is host specific + // (either hcsschema.ProcessParameters or lcow.ProcessParameters). + CreateProcess(ctx context.Context, config interface{}) (Process, error) + // OS returns the host's operating system, "linux" or "windows". + OS() string + // IsOCI specifies whether this is an OCI-compliant process host. If true, + // then the configuration passed to CreateProcess should have an OCI process + // spec (or nil if this is the initial process in an OCI container). + // Otherwise, it should have the HCS-specific process parameters. + IsOCI() bool +} + +// Container is the interface for container objects, either running on the host or +// in a utility VM. +type Container interface { + ProcessHost + // Close releases the resources associated with the container. Depending on + // the implementation, this may also terminate the container. + Close() error + // ID returns the container ID. + ID() string + // Properties returns the requested container properties targeting a V1 schema container. + Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) + // PropertiesV2 returns the requested container properties targeting a V2 schema container. + PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) + // Start starts a container. + Start(ctx context.Context) error + // Shutdown sends a shutdown request to the container (but does not wait for + // the shutdown to complete). + Shutdown(ctx context.Context) error + // Terminate sends a terminate request to the container (but does not wait + // for the terminate to complete). + Terminate(ctx context.Context) error + // Wait waits for the container to terminate, or for the connection to the + // container to be terminated by some error condition (including calling + // Close). + Wait() error + // WaitChannel returns the wait channel of the container + WaitChannel() <-chan struct{} + // WaitError returns the container termination error. + // This function should only be called after the channel in WaitChannel() + // is closed. Otherwise it is not thread safe. + WaitError() error + // Modify sends a request to modify container resources + Modify(ctx context.Context, config interface{}) error +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go new file mode 100644 index 00000000000..d13772b0301 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -0,0 +1,161 @@ +package hcs + +import ( + "fmt" + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" +) + +var ( + nextCallback uintptr + callbackMap = map[uintptr]*notificationWatcherContext{} + callbackMapLock = sync.RWMutex{} + + notificationWatcherCallback = syscall.NewCallback(notificationWatcher) + + // Notifications for HCS_SYSTEM handles + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemCrashReport hcsNotification = 0x00000006 + hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 + hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 + hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 + hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A + hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B + hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C + hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D + hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E + + // Notifications for HCS_PROCESS handles + hcsNotificationProcessExited hcsNotification = 0x00010000 + + // Common notifications + hcsNotificationInvalid hcsNotification = 0x00000000 + hcsNotificationServiceDisconnect hcsNotification = 0x01000000 +) + +type hcsNotification uint32 + +func (hn hcsNotification) String() string { + switch hn { + case hcsNotificationSystemExited: + return "SystemExited" + case hcsNotificationSystemCreateCompleted: + return "SystemCreateCompleted" + case hcsNotificationSystemStartCompleted: + return "SystemStartCompleted" + case hcsNotificationSystemPauseCompleted: + return "SystemPauseCompleted" + case hcsNotificationSystemResumeCompleted: + return "SystemResumeCompleted" + case hcsNotificationSystemCrashReport: + return "SystemCrashReport" + case hcsNotificationSystemSiloJobCreated: + return "SystemSiloJobCreated" + case hcsNotificationSystemSaveCompleted: + return "SystemSaveCompleted" + case hcsNotificationSystemRdpEnhancedModeStateChanged: + return "SystemRdpEnhancedModeStateChanged" + case hcsNotificationSystemShutdownFailed: + return "SystemShutdownFailed" + case hcsNotificationSystemGetPropertiesCompleted: + return "SystemGetPropertiesCompleted" + case hcsNotificationSystemModifyCompleted: + return "SystemModifyCompleted" + case hcsNotificationSystemCrashInitiated: + return "SystemCrashInitiated" + case hcsNotificationSystemGuestConnectionClosed: + return "SystemGuestConnectionClosed" + case hcsNotificationProcessExited: + return "ProcessExited" + case hcsNotificationInvalid: + return "Invalid" + case hcsNotificationServiceDisconnect: + return "ServiceDisconnect" + default: + return fmt.Sprintf("Unknown: %d", hn) + } +} + +type notificationChannel chan error + +type notificationWatcherContext struct { + channels notificationChannels + handle vmcompute.HcsCallback + + systemID string + processID int +} + +type notificationChannels map[hcsNotification]notificationChannel + +func newSystemChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationSystemExited, + hcsNotificationSystemCreateCompleted, + hcsNotificationSystemStartCompleted, + hcsNotificationSystemPauseCompleted, + hcsNotificationSystemResumeCompleted, + hcsNotificationSystemSaveCompleted, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func newProcessChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationProcessExited, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func closeChannels(channels notificationChannels) { + for _, c := range channels { + close(c) + } +} + +func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { + var result error + if int32(notificationStatus) < 0 { + result = interop.Win32FromHresult(notificationStatus) + } + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return 0 + } + + log := logrus.WithFields(logrus.Fields{ + "notification-type": notificationType.String(), + "system-id": context.systemID, + }) + if context.processID != 0 { + log.Data[logfields.ProcessID] = context.processID + } + log.Debug("HCS notification") + + if channel, ok := context.channels[notificationType]; ok { + channel <- result + } + + return 0 +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go new file mode 100644 index 00000000000..295d4b849c3 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -0,0 +1,343 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "syscall" + + "github.com/Microsoft/hcsshim/internal/log" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists + ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = syscall.Errno(0x490) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = syscall.Errno(0x32) + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = syscall.Errno(0xd) + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = errors.New("hcsshim: timeout waiting for notification") + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = errors.New("unexpected container exit") + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = syscall.Errno(0x7f) + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + + // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly + ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) +) + +type ErrorEvent struct { + Message string `json:"Message,omitempty"` // Fully formated error message + StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form + Provider string `json:"Provider,omitempty"` + EventID uint16 `json:"EventId,omitempty"` + Flags uint32 `json:"Flags,omitempty"` + Source string `json:"Source,omitempty"` + //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) +} + +type hcsResult struct { + Error int32 + ErrorMessage string + ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` +} + +func (ev *ErrorEvent) String() string { + evs := "[Event Detail: " + ev.Message + if ev.StackTrace != "" { + evs += " Stack Trace: " + ev.StackTrace + } + if ev.Provider != "" { + evs += " Provider: " + ev.Provider + } + if ev.EventID != 0 { + evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) + } + if ev.Flags != 0 { + evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) + } + if ev.Source != "" { + evs += " Source: " + ev.Source + } + evs += "]" + return evs +} + +func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { + if resultJSON != "" { + result := &hcsResult{} + if err := json.Unmarshal([]byte(resultJSON), result); err != nil { + log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") + return nil + } + return result.ErrorEvents + } + return nil +} + +type HcsError struct { + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &HcsError{} + +func (e *HcsError) Error() string { + s := e.Op + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *HcsError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() //nolint:staticcheck +} + +func (e *HcsError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + SystemID string + Pid int + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &ProcessError{} + +// SystemError is an error encountered in HCS during an operation on a Container object +type SystemError struct { + ID string + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &SystemError{} + +func (e *SystemError) Error() string { + s := e.Op + " " + e.ID + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *SystemError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() //nolint:staticcheck +} + +func (e *SystemError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*SystemError); ok { + return err + } + return &SystemError{ + ID: system.ID(), + Op: op, + Err: err, + Events: events, + } +} + +func (e *ProcessError) Error() string { + s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *ProcessError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() //nolint:staticcheck +} + +func (e *ProcessError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*ProcessError); ok { + return err + } + return &ProcessError{ + Pid: process.Pid(), + SystemID: process.SystemID(), + Op: op, + Err: err, + Events: events, + } +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + err = getInnerError(err) + return err == ErrComputeSystemDoesNotExist || + err == ErrElementNotFound +} + +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + err = getInnerError(err) + return err == ErrInvalidHandle +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + err = getInnerError(err) + return err == ErrAlreadyClosed +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationPending +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + if err, ok := err.(net.Error); ok && err.Timeout() { + return true + } + err = getInnerError(err) + return err == ErrTimeout +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeAlreadyStopped || + err == ErrProcessAlreadyStopped || + err == ErrElementNotFound +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + err = getInnerError(err) + // If Platform doesn't recognize or support the request sent, below errors are seen + return err == ErrVmcomputeInvalidJSON || + err == ErrInvalidData || + err == ErrNotSupported || + err == ErrVmcomputeUnknownMessage +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationInvalidState +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationAccessIsDenied +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *HcsError: + err = pe.Err + case *SystemError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go new file mode 100644 index 00000000000..78490d6cdd2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -0,0 +1,589 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "io" + "os" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" +) + +// ContainerError is an error encountered in HCS +type Process struct { + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool + + closedWaitOnce sync.Once + waitBlock chan struct{} + exitCode int + waitError error +} + +func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { + return &Process{ + handle: process, + processID: processID, + system: computeSystem, + waitBlock: make(chan struct{}), + } +} + +type processModifyRequest struct { + Operation string + ConsoleSize *consoleSize `json:",omitempty"` + CloseHandle *closeHandle `json:",omitempty"` +} + +type consoleSize struct { + Height uint16 + Width uint16 +} + +type closeHandle struct { + Handle string +} + +type processStatus struct { + ProcessID uint32 + Exited bool + ExitCode uint32 + LastWaitResult int32 +} + +const stdIn string = "StdIn" + +const ( + modifyConsoleSize string = "ConsoleSize" + modifyCloseHandle string = "CloseHandle" +) + +// Pid returns the process ID of the process within the container. +func (process *Process) Pid() int { + return process.processID +} + +// SystemID returns the ID of the process's compute system. +func (process *Process) SystemID() string { + return process.system.ID() +} + +func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { + switch err { + case nil: + return true, nil + case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + select { + case <-process.waitBlock: + // The process exit notification has already arrived. + default: + // The process should be gone, but we have not received the notification. + // After a second, force unblock the process wait to work around a possible + // deadlock in the HCS. + go func() { + time.Sleep(time.Second) + process.closedWaitOnce.Do(func() { + log.G(ctx).WithError(err).Warn("force unblocking process waits") + process.exitCode = -1 + process.waitError = err + close(process.waitBlock) + }) + }() + } + return false, nil + default: + return false, err + } +} + +// Signal signals the process with `options`. +// +// For LCOW `guestrequest.SignalProcessOptionsLCOW`. +// +// For WCOW `guestrequest.SignalProcessOptionsWCOW`. +func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Signal" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + optionsb, err := json.Marshal(options) + if err != nil { + return false, err + } + + resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) + } + return delivered, err +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *Process) Kill(ctx context.Context) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Kill" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil + } + + // HCS serializes the signals sent to a target pid per compute system handle. + // To avoid SIGKILL being serialized behind other signals, we open a new compute + // system handle to deliver the kill signal. + // If the calls to opening a new compute system handle fail, we forcefully + // terminate the container itself so that no container is left behind + hcsSystem, err := OpenComputeSystem(ctx, process.system.id) + if err != nil { + // log error and force termination of container + log.G(ctx).WithField("err", err).Error("OpenComputeSystem() call failed") + err = process.system.Terminate(ctx) + // if the Terminate() call itself ever failed, log and return error + if err != nil { + log.G(ctx).WithField("err", err).Error("Terminate() call failed") + return false, err + } + process.system.Close() + return true, nil + } + defer hcsSystem.Close() + + newProcessHandle, err := hcsSystem.OpenProcess(ctx, process.Pid()) + if err != nil { + // Return true only if the target process has either already + // exited, or does not exist. + if IsAlreadyStopped(err) { + return true, nil + } else { + return false, err + } + } + defer newProcessHandle.Close() + + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, newProcessHandle.handle) + if err != nil { + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } + events := processHcsResult(ctx, resultJSON) + delivered, err := newProcessHandle.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(newProcessHandle, operation, err, events) + } + + process.killSignalDelivered = delivered + return delivered, err +} + +// waitBackground waits for the process exit notification. Once received sets +// `process.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `process.handle` but `Wait` is safe to +// call multiple times. +func (process *Process) waitBackground() { + operation := "hcs::Process::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + var ( + err error + exitCode = -1 + propertiesJSON string + resultJSON string + ) + + err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) + if err != nil { + err = makeProcessError(process, operation, err, nil) + log.G(ctx).WithError(err).Error("failed wait") + } else { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + // Make sure we didnt race with Close() here + if process.handle != 0 { + propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + err = makeProcessError(process, operation, err, events) //nolint:ineffassign + } else { + properties := &processStatus{} + err = json.Unmarshal([]byte(propertiesJSON), properties) + if err != nil { + err = makeProcessError(process, operation, err, nil) //nolint:ineffassign + } else { + if properties.LastWaitResult != 0 { + log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") + } else { + exitCode = int(properties.ExitCode) + } + } + } + } + } + log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") + + process.closedWaitOnce.Do(func() { + process.exitCode = exitCode + process.waitError = err + close(process.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait waits for the process to exit. If the process has already exited returns +// the pervious error (if any). +func (process *Process) Wait() error { + <-process.waitBlock + return process.waitError +} + +// ResizeConsole resizes the console of the process. +func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::ResizeConsole" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyConsoleSize, + ConsoleSize: &consoleSize{ + Height: height, + Width: width, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *Process) ExitCode() (int, error) { + select { + case <-process.waitBlock: + if process.waitError != nil { + return -1, process.waitError + } + return process.exitCode, nil + default: + return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) + } +} + +// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes. Once returned, these pipes +// are the responsibility of the caller to close. +func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { + operation := "hcs::Process::StdioLegacy" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + if process.handle == 0 { + return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.hasCachedStdio { + stdin, stdout, stderr := process.stdin, process.stdout, process.stderr + process.stdin, process.stdout, process.stderr = nil, nil, nil + process.hasCachedStdio = false + return stdin, stdout, stderr, nil + } + + processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, events) + } + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, nil) + } + + return pipes[0], pipes[1], pipes[2], nil +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. +// To close them, close the process handle. +func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + return process.stdin, process.stdout, process.stderr +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *Process) CloseStdin(ctx context.Context) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::CloseStdin" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyCloseHandle, + CloseHandle: &closeHandle{ + Handle: stdIn, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + process.stdioLock.Unlock() + + return nil +} + +func (process *Process) CloseStdout(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + return nil +} + +func (process *Process) CloseStderr(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + + } + return nil +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *Process) Close() (err error) { + operation := "hcs::Process::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + // Don't double free this + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + } + process.stdioLock.Unlock() + + if err = process.unregisterCallback(ctx); err != nil { + return makeProcessError(process, operation, err, nil) + } + + if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { + return makeProcessError(process, operation, err, nil) + } + + process.handle = 0 + process.closedWaitOnce.Do(func() { + process.exitCode = -1 + process.waitError = ErrAlreadyClosed + close(process.waitBlock) + }) + + return nil +} + +func (process *Process) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newProcessChannels(), + systemID: process.SystemID(), + processID: process.processID, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + process.callbackNumber = callbackNumber + + return nil +} + +func (process *Process) unregisterCallback(ctx context.Context) error { + callbackNumber := process.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // vmcompute.HcsUnregisterProcessCallback has its own synchronization to + // wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go new file mode 100644 index 00000000000..b621c559388 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go @@ -0,0 +1,250 @@ +package schema1 + +import ( + "encoding/json" + "time" + + "github.com/Microsoft/go-winio/pkg/guid" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig struct { + ApplicationName string `json:",omitempty"` + CommandLine string `json:",omitempty"` + CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows + User string `json:",omitempty"` + WorkingDirectory string `json:",omitempty"` + Environment map[string]string `json:",omitempty"` + EmulateConsole bool `json:",omitempty"` + CreateStdInPipe bool `json:",omitempty"` + CreateStdOutPipe bool `json:",omitempty"` + CreateStdErrPipe bool `json:",omitempty"` + ConsoleSize [2]uint `json:",omitempty"` + CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows + OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows +} + +type Layer struct { + ID string + Path string +} + +type MappedDir struct { + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 + CreateInUtilityVM bool + // LinuxMetadata - Support added in 1803/RS4+. + LinuxMetadata bool `json:",omitempty"` +} + +type MappedPipe struct { + HostPath string + ContainerPipeName string +} + +type HvRuntime struct { + ImagePath string `json:",omitempty"` + SkipTemplate bool `json:",omitempty"` + LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM + LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM + LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode + BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD + WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD +} + +type MappedVirtualDisk struct { + HostPath string `json:",omitempty"` // Path to VHD on the host + ContainerPath string // Platform-specific mount point path in the container + CreateInUtilityVM bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" + AttachOnly bool `json:",omitempty"` +} + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice struct { + // InterfaceClassGUID of the device to assign to container. + InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` +} + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig struct { + SystemType string // HCS requires this to be hard-coded to "Container" + Name string // Name of the container. We use the docker ID. + Owner string `json:",omitempty"` // The management platform that created this container + VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} + IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows + LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID + Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID + Credentials string `json:",omitempty"` // Credentials information + ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. + ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. + ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. + StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS + StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second + StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller + MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes + HostName string `json:",omitempty"` // Hostname + MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) + MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes + HvPartition bool // True if it a Hyper-V Container + NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. + EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container + HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM + Servicing bool `json:",omitempty"` // True if this container is for servicing + AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution + DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution + ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. + TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed + MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start + AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 +} + +type ComputeSystemQuery struct { + IDs []string `json:"Ids,omitempty"` + Types []string `json:",omitempty"` + Names []string `json:",omitempty"` + Owners []string `json:",omitempty"` +} + +type PropertyType string + +const ( + PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 + PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 +) + +type PropertyQuery struct { + PropertyTypes []PropertyType `json:",omitempty"` +} + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties struct { + ID string `json:"Id"` + State string + Name string + SystemType string + RuntimeOSType string `json:"RuntimeOsType,omitempty"` + Owner string + SiloGUID string `json:"SiloGuid,omitempty"` + RuntimeID guid.GUID `json:"RuntimeId,omitempty"` + IsRuntimeTemplate bool `json:",omitempty"` + RuntimeImagePath string `json:",omitempty"` + Stopped bool `json:",omitempty"` + ExitType string `json:",omitempty"` + AreUpdatesPending bool `json:",omitempty"` + ObRoot string `json:",omitempty"` + Statistics Statistics `json:",omitempty"` + ProcessList []ProcessListItem `json:",omitempty"` + MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` + GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` +} + +// MemoryStats holds the memory statistics for a container +type MemoryStats struct { + UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:",omitempty"` + RuntimeUser100ns uint64 `json:",omitempty"` + RuntimeKernel100ns uint64 `json:",omitempty"` +} + +// StorageStats holds the storage statistics for a container +type StorageStats struct { + ReadCountNormalized uint64 `json:",omitempty"` + ReadSizeBytes uint64 `json:",omitempty"` + WriteCountNormalized uint64 `json:",omitempty"` + WriteSizeBytes uint64 `json:",omitempty"` +} + +// NetworkStats holds the network statistics for a container +type NetworkStats struct { + BytesReceived uint64 `json:",omitempty"` + BytesSent uint64 `json:",omitempty"` + PacketsReceived uint64 `json:",omitempty"` + PacketsSent uint64 `json:",omitempty"` + DroppedPacketsIncoming uint64 `json:",omitempty"` + DroppedPacketsOutgoing uint64 `json:",omitempty"` + EndpointId string `json:",omitempty"` + InstanceId string `json:",omitempty"` +} + +// Statistics is the structure returned by a statistics call on a container +type Statistics struct { + Timestamp time.Time `json:",omitempty"` + ContainerStartTime time.Time `json:",omitempty"` + Uptime100ns uint64 `json:",omitempty"` + Memory MemoryStats `json:",omitempty"` + Processor ProcessorStats `json:",omitempty"` + Storage StorageStats `json:",omitempty"` + Network []NetworkStats `json:",omitempty"` +} + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem struct { + CreateTimestamp time.Time `json:",omitempty"` + ImageName string `json:",omitempty"` + KernelTime100ns uint64 `json:",omitempty"` + MemoryCommitBytes uint64 `json:",omitempty"` + MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` + MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` + ProcessId uint32 `json:",omitempty"` + UserTime100ns uint64 `json:",omitempty"` +} + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController struct { + MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` +} + +// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM +type GuestDefinedCapabilities struct { + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` + DumpStacksSupported bool `json:",omitempty"` + DeleteContainerStateSupported bool `json:",omitempty"` + UpdateContainerSupported bool `json:",omitempty"` +} + +// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM +type GuestConnectionInfo struct { + SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` + ProtocolVersion uint32 `json:",omitempty"` + GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` +} + +// Type of Request Support in ModifySystem +type RequestType string + +// Type of Resource Support in ModifySystem +type ResourceType string + +// RequestType const +const ( + Add RequestType = "Add" + Remove RequestType = "Remove" + Network ResourceType = "Network" +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse struct { + Resource ResourceType `json:"ResourceType"` + Data interface{} `json:"Settings"` + Request RequestType `json:"RequestType,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go new file mode 100644 index 00000000000..70884aad75f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Attachment struct { + Type_ string `json:"Type,omitempty"` + + Path string `json:"Path,omitempty"` + + IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` + + CachingMode string `json:"CachingMode,omitempty"` + + NoWriteHardening bool `json:"NoWriteHardening,omitempty"` + + DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` + + IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` + + CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go new file mode 100644 index 00000000000..ecbbed4c233 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Battery struct { +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go new file mode 100644 index 00000000000..c1ea3953b58 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CacheQueryStatsResponse struct { + L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` + + L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` + + L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go new file mode 100644 index 00000000000..ca75277a3f2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Chipset struct { + Uefi *Uefi `json:"Uefi,omitempty"` + + IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` + + BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` + + ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` + + ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` + + UseUtc bool `json:"UseUtc,omitempty"` + + // LinuxKernelDirect - Added in v2.2 Builds >=181117 + LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go new file mode 100644 index 00000000000..b4f9c315b05 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CloseHandle struct { + Handle string `json:"Handle,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go new file mode 100644 index 00000000000..8bf8cab60e5 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +type ComPort struct { + NamedPipe string `json:"NamedPipe,omitempty"` + + OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go new file mode 100644 index 00000000000..10cea67e042 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ComputeSystem struct { + Owner string `json:"Owner,omitempty"` + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + HostedSystem interface{} `json:"HostedSystem,omitempty"` + + Container *Container `json:"Container,omitempty"` + + VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` + + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go new file mode 100644 index 00000000000..1d5dfe68ad6 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go @@ -0,0 +1,72 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "net/http" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKey takes an APIKey as authentication for the request + ContextAPIKey = contextKey("apikey") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +type Configuration struct { + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "https://localhost", + DefaultHeader: make(map[string]string), + UserAgent: "Swagger-Codegen/2.1.0/go", + } + return cfg +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go new file mode 100644 index 00000000000..68aa04a573e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ConsoleSize struct { + Height int32 `json:"Height,omitempty"` + + Width int32 `json:"Width,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go new file mode 100644 index 00000000000..39a54432c02 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Container struct { + GuestOs *GuestOs `json:"GuestOs,omitempty"` + + Storage *Storage `json:"Storage,omitempty"` + + MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` + + MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` + + Memory *Memory `json:"Memory,omitempty"` + + Processor *Processor `json:"Processor,omitempty"` + + Networking *Networking `json:"Networking,omitempty"` + + HvSocket *HvSocket `json:"HvSocket,omitempty"` + + ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go new file mode 100644 index 00000000000..495c6ebc8f4 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardAddInstanceRequest struct { + Id string `json:"Id,omitempty"` + CredentialSpec string `json:"CredentialSpec,omitempty"` + Transport string `json:"Transport,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go new file mode 100644 index 00000000000..1ed4c008f25 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardHvSocketServiceConfig struct { + ServiceId string `json:"ServiceId,omitempty"` + ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go new file mode 100644 index 00000000000..d7ebd0fcca1 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardInstance struct { + Id string `json:"Id,omitempty"` + CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` + HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go new file mode 100644 index 00000000000..71005b090be --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardModifyOperation string + +const ( + AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" + RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" +) diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go new file mode 100644 index 00000000000..952cda4965c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardOperationRequest struct { + Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go new file mode 100644 index 00000000000..32e5a3beed1 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardRemoveInstanceRequest struct { + Id string `json:"Id,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go new file mode 100644 index 00000000000..0f8f644379c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardState struct { + + // Authentication cookie for calls to a Container Credential Guard instance. + Cookie string `json:"Cookie,omitempty"` + + // Name of the RPC endpoint of the Container Credential Guard instance. + RpcEndpoint string `json:"RpcEndpoint,omitempty"` + + // Transport used for the configured Container Credential Guard instance. + Transport string `json:"Transport,omitempty"` + + // Credential spec used for the configured Container Credential Guard instance. + CredentialSpec string `json:"CredentialSpec,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go new file mode 100644 index 00000000000..ea306fa21ac --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardSystemInfo struct { + Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go new file mode 100644 index 00000000000..1fd7ca5d56f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// memory usage as viewed from within the container +type ContainerMemoryInformation struct { + TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` + + TotalUsage int32 `json:"TotalUsage,omitempty"` + + CommittedBytes int32 `json:"CommittedBytes,omitempty"` + + SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` + + CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` + + PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go new file mode 100644 index 00000000000..90332a5190f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines +type CpuGroup struct { + Id string `json:"Id,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go new file mode 100644 index 00000000000..8794961bf5c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupAffinity struct { + LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go new file mode 100644 index 00000000000..0be0475d41a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupConfig struct { + GroupId string `json:"GroupId,omitempty"` + Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` + GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` + // Hypervisor CPU group IDs exposed to clients + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go new file mode 100644 index 00000000000..3ace0ccc3b8 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to return cpu groups for a Service property query +type CpuGroupConfigurations struct { + CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go new file mode 100644 index 00000000000..7d897807016 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupOperation string + +const ( + CreateGroup CPUGroupOperation = "CreateGroup" + DeleteGroup CPUGroupOperation = "DeleteGroup" + SetProperty CPUGroupOperation = "SetProperty" +) diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go new file mode 100644 index 00000000000..bbad6a2c450 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupProperty struct { + PropertyCode uint32 `json:"PropertyCode,omitempty"` + PropertyValue uint32 `json:"PropertyValue,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go new file mode 100644 index 00000000000..91a8278fe3c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Create group operation settings +type CreateGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go new file mode 100644 index 00000000000..134bd988175 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Delete group operation settings +type DeleteGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go new file mode 100644 index 00000000000..31c4538affc --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceType string + +const ( + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" +) + +type Device struct { + // The type of device to assign to the container. + Type DeviceType `json:"Type,omitempty"` + // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. + LocationPath string `json:"LocationPath,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go new file mode 100644 index 00000000000..e985d96d228 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Devices struct { + ComPorts map[string]ComPort `json:"ComPorts,omitempty"` + + Scsi map[string]Scsi `json:"Scsi,omitempty"` + + VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` + + NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` + + VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` + + Keyboard *Keyboard `json:"Keyboard,omitempty"` + + Mouse *Mouse `json:"Mouse,omitempty"` + + HvSocket *HvSocket2 `json:"HvSocket,omitempty"` + + EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` + + GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` + + VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` + + Plan9 *Plan9 `json:"Plan9,omitempty"` + + Battery *Battery `json:"Battery,omitempty"` + + FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` + + SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` + + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + VirtualPci map[string]VirtualPciDevice `json:",omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go new file mode 100644 index 00000000000..85450c41e10 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type EnhancedModeVideo struct { + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go new file mode 100644 index 00000000000..fe86cab6556 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type FlexibleIoDevice struct { + EmulatorId string `json:"EmulatorId,omitempty"` + + HostingModel string `json:"HostingModel,omitempty"` + + Configuration []string `json:"Configuration,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go new file mode 100644 index 00000000000..7db29495b3e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestConnection struct { + + // Use Vsock rather than Hyper-V sockets to communicate with the guest service. + UseVsock bool `json:"UseVsock,omitempty"` + + // Don't disconnect the guest connection when pausing the virtual machine. + UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go new file mode 100644 index 00000000000..8a369bab71c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Information about the guest. +type GuestConnectionInfo struct { + + // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. + SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` + + ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` + + GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go new file mode 100644 index 00000000000..af828004835 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestCrashReporting struct { + WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go new file mode 100644 index 00000000000..8838519a39c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestOs struct { + HostName string `json:"HostName,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go new file mode 100644 index 00000000000..ef1eec88656 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestState struct { + + // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. + GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. + RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` + + // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. + ForceTransientState bool `json:"ForceTransientState,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go new file mode 100644 index 00000000000..2238ce5306c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to request a service processor modification +type HostProcessorModificationRequest struct { + Operation CPUGroupOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go new file mode 100644 index 00000000000..ea3084bca7f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HostedSystem struct { + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + Container *Container `json:"Container,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go new file mode 100644 index 00000000000..23b2ee9e7d4 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocket struct { + Config *HvSocketSystemConfig `json:"Config,omitempty"` + + EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go new file mode 100644 index 00000000000..a017691f02d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// HvSocket configuration for a VM +type HvSocket2 struct { + HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go new file mode 100644 index 00000000000..84c11b93ee5 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. +type HvSocketAddress struct { + LocalAddress string `json:"LocalAddress,omitempty"` + ParentAddress string `json:"ParentAddress,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go new file mode 100644 index 00000000000..ecd9f7fbac2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go @@ -0,0 +1,28 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocketServiceConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` + + // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors + AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` + + // Disabled controls whether the HvSocket service is accepting connection requests. + // This set to true will make the service refuse all incoming connections as well as cancel + // any connections already established. The service itself will still be active however + // and can be re-enabled at a future time. + Disabled bool `json:"Disabled,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go new file mode 100644 index 00000000000..69f4f9d39b9 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +type HvSocketSystemConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). + DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. + DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` + + ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go new file mode 100644 index 00000000000..a614d63bd72 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go @@ -0,0 +1,42 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterruptModerationName string + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +type InterruptModerationValue uint32 + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go new file mode 100644 index 00000000000..2a55cc37cd3 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IovSettings struct { + // The weight assigned to this port for I/O virtualization (IOV) offloading. + // Setting this to 0 disables IOV offloading. + OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` + + // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. + QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` + + // The interrupt moderation mode for I/O virtualization (IOV) offloading. + InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go new file mode 100644 index 00000000000..3d3fa3b1c73 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Keyboard struct { +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go new file mode 100644 index 00000000000..176c49d4959 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Layer struct { + Id string `json:"Id,omitempty"` + + Path string `json:"Path,omitempty"` + + PathType string `json:"PathType,omitempty"` + + // Unspecified defaults to Enabled + Cache string `json:"Cache,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go new file mode 100644 index 00000000000..0ab6c280fc8 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.2 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LinuxKernelDirect struct { + KernelFilePath string `json:"KernelFilePath,omitempty"` + + InitRdPath string `json:"InitRdPath,omitempty"` + + KernelCmdLine string `json:"KernelCmdLine,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go new file mode 100644 index 00000000000..2e3aa5e1750 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LogicalProcessor struct { + LpIndex uint32 `json:"LpIndex,omitempty"` + NodeNumber uint8 `json:"NodeNumber,omitempty"` + PackageId uint32 `json:"PackageId,omitempty"` + CoreId uint32 `json:"CoreId,omitempty"` + RootVpIndex int32 `json:"RootVpIndex,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go new file mode 100644 index 00000000000..9b86a40457f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedDirectory struct { + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` + + ContainerPath string `json:"ContainerPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go new file mode 100644 index 00000000000..208074e9a25 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedPipe struct { + ContainerPipeName string `json:"ContainerPipeName,omitempty"` + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go new file mode 100644 index 00000000000..30749c67249 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go new file mode 100644 index 00000000000..71224c75b9d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go @@ -0,0 +1,49 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + + // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed + // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by + // the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + + // LowMmioGapInMB is the low MMIO region allocated below 4GB. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + + // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and + // size). + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + + // HighMmioGapInMB is the high MMIO region. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go new file mode 100644 index 00000000000..811779b04b2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MemoryInformationForVm struct { + VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` + + VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` + + VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go new file mode 100644 index 00000000000..906ba597f9f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Memory runtime statistics +type MemoryStats struct { + MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + + MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + + MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 00000000000..8dbe40b3be2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 00000000000..8fe89f92747 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 00000000000..a62568d892e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 00000000000..a7410febd6d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 00000000000..3553640647e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 00000000000..7be98b54107 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 00000000000..3ab9cf1ecf0 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 00000000000..d2f51b3b53c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 00000000000..47dfb55bfa8 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 00000000000..8867ebe5f02 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go new file mode 100644 index 00000000000..1384ed88821 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModificationRequest struct { + PropertyType PropertyType `json:"PropertyType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go new file mode 100644 index 00000000000..d29455a3e43 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModifySettingRequest struct { + ResourcePath string `json:"ResourcePath,omitempty"` + + RequestType string `json:"RequestType,omitempty"` + + Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated + + GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go new file mode 100644 index 00000000000..ccf8b938f3a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Mouse struct { +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go new file mode 100644 index 00000000000..7408abd317d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NetworkAdapter struct { + EndpointId string `json:"EndpointId,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + // The I/O virtualization (IOV) offloading configuration. + IovSettings *IovSettings `json:"IovSettings,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go new file mode 100644 index 00000000000..e5ea187a295 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Networking struct { + AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` + + DnsSearchList string `json:"DnsSearchList,omitempty"` + + NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` + + // Guid in windows; string in linux + Namespace string `json:"Namespace,omitempty"` + + NetworkAdapters []string `json:"NetworkAdapters,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go new file mode 100644 index 00000000000..d96c9501f33 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Notification data that is indicated to components running in the Virtual Machine. +type PauseNotification struct { + Reason string `json:"Reason,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go new file mode 100644 index 00000000000..21707a88eb7 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Options for HcsPauseComputeSystem +type PauseOptions struct { + SuspensionLevel string `json:"SuspensionLevel,omitempty"` + + HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go new file mode 100644 index 00000000000..29d8c8012ff --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9 struct { + Shares []Plan9Share `json:"Shares,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go new file mode 100644 index 00000000000..41f8fdea029 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go @@ -0,0 +1,34 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9Share struct { + Name string `json:"Name,omitempty"` + + // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. + AccessName string `json:"AccessName,omitempty"` + + Path string `json:"Path,omitempty"` + + Port int32 `json:"Port,omitempty"` + + // Flags are marked private. Until they are exported correctly + // + // ReadOnly 0x00000001 + // LinuxMetadata 0x00000004 + // CaseSensitive 0x00000008 + Flags int32 `json:"Flags,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go new file mode 100644 index 00000000000..e9a662dd59d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go @@ -0,0 +1,33 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Information about a process running in a container +type ProcessDetails struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + ImageName string `json:"ImageName,omitempty"` + + CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` + + UserTime100ns int32 `json:"UserTime100ns,omitempty"` + + KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` + + MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` + + MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` + + MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go new file mode 100644 index 00000000000..e4ed095c7be --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Passed to HcsRpc_ModifyProcess +type ProcessModifyRequest struct { + Operation string `json:"Operation,omitempty"` + + ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` + + CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go new file mode 100644 index 00000000000..82b0d0532b2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessParameters struct { + ApplicationName string `json:"ApplicationName,omitempty"` + + CommandLine string `json:"CommandLine,omitempty"` + + // optional alternative to CommandLine, currently only supported by Linux GCS + CommandArgs []string `json:"CommandArgs,omitempty"` + + User string `json:"User,omitempty"` + + WorkingDirectory string `json:"WorkingDirectory,omitempty"` + + Environment map[string]string `json:"Environment,omitempty"` + + // if set, will run as low-privilege process + RestrictedToken bool `json:"RestrictedToken,omitempty"` + + // if set, ignore StdErrPipe + EmulateConsole bool `json:"EmulateConsole,omitempty"` + + CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` + + CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` + + CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` + + // height then width + ConsoleSize []int32 `json:"ConsoleSize,omitempty"` + + // if set, find an existing session for the user and create the process in it + UseExistingLogin bool `json:"UseExistingLogin,omitempty"` + + // if set, use the legacy console instead of conhost + UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go new file mode 100644 index 00000000000..ad9a4fa9ad6 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Status of a process running in a container +type ProcessStatus struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + Exited bool `json:"Exited,omitempty"` + + ExitCode int32 `json:"ExitCode,omitempty"` + + LastWaitResult int32 `json:"LastWaitResult,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go new file mode 100644 index 00000000000..bb24e88da1a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor struct { + Count int32 `json:"Count,omitempty"` + + Maximum int32 `json:"Maximum,omitempty"` + + Weight int32 `json:"Weight,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go new file mode 100644 index 00000000000..c64f335ec7d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.5 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor2 struct { + Count int32 `json:"Count,omitempty"` + + Limit int32 `json:"Limit,omitempty"` + + Weight int32 `json:"Weight,omitempty"` + + ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` + + // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go new file mode 100644 index 00000000000..6157e252256 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU runtime statistics +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` + + RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` + + RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go new file mode 100644 index 00000000000..885156e77fa --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessorTopology struct { + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go new file mode 100644 index 00000000000..17558cba0f2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -0,0 +1,54 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + v1 "github.com/containerd/cgroups/stats/v1" +) + +type Properties struct { + Id string `json:"Id,omitempty"` + + SystemType string `json:"SystemType,omitempty"` + + RuntimeOsType string `json:"RuntimeOsType,omitempty"` + + Name string `json:"Name,omitempty"` + + Owner string `json:"Owner,omitempty"` + + RuntimeId string `json:"RuntimeId,omitempty"` + + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` + + State string `json:"State,omitempty"` + + Stopped bool `json:"Stopped,omitempty"` + + ExitType string `json:"ExitType,omitempty"` + + Memory *MemoryInformationForVm `json:"Memory,omitempty"` + + Statistics *Statistics `json:"Statistics,omitempty"` + + ProcessList []ProcessDetails `json:"ProcessList,omitempty"` + + TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` + + GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` + + // Metrics is not part of the API for HCS but this is used for LCOW v2 to + // return the full cgroup metrics from the guest. + Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go new file mode 100644 index 00000000000..d6d80df1314 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// By default the basic properties will be returned. This query provides a way to request specific properties. +type PropertyQuery struct { + PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go new file mode 100644 index 00000000000..98f2c96edbd --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type PropertyType string + +const ( + PTMemory PropertyType = "Memory" + PTGuestMemory PropertyType = "GuestMemory" + PTStatistics PropertyType = "Statistics" + PTProcessList PropertyType = "ProcessList" + PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" + PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" + PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. + PTGuestConnection PropertyType = "GuestConnection" + PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" + PTProcessorTopology PropertyType = "ProcessorTopology" + PTCPUGroup PropertyType = "CpuGroup" +) diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go new file mode 100644 index 00000000000..8d5f5c1719e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RdpConnectionOptions struct { + AccessSids []string `json:"AccessSids,omitempty"` + + NamedPipe string `json:"NamedPipe,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go new file mode 100644 index 00000000000..006906f6e2f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryChanges struct { + AddValues []RegistryValue `json:"AddValues,omitempty"` + + DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go new file mode 100644 index 00000000000..26fde99c74c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryKey struct { + Hive string `json:"Hive,omitempty"` + + Name string `json:"Name,omitempty"` + + Volatile bool `json:"Volatile,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go new file mode 100644 index 00000000000..3f203176c32 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -0,0 +1,30 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryValue struct { + Key *RegistryKey `json:"Key,omitempty"` + + Name string `json:"Name,omitempty"` + + Type_ string `json:"Type,omitempty"` + + // One and only one value type must be set. + StringValue string `json:"StringValue,omitempty"` + + BinaryValue string `json:"BinaryValue,omitempty"` + + DWordValue int32 `json:"DWordValue,omitempty"` + + QWordValue int32 `json:"QWordValue,omitempty"` + + // Only used if RegistryValueType is CustomType The data is in BinaryValue + CustomType int32 `json:"CustomType,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go new file mode 100644 index 00000000000..778ff58735a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RestoreState struct { + + // The path to the save state file to restore the system from. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` + + // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. + TemplateSystemId string `json:"TemplateSystemId,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go new file mode 100644 index 00000000000..e55fa1d98a5 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SaveOptions struct { + + // The type of save operation to be performed. + SaveType string `json:"SaveType,omitempty"` + + // The path to the file that will container the saved state. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go new file mode 100644 index 00000000000..bf253a470b6 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Scsi struct { + + // Map of attachments, where the key is the integer LUN number on the controller. + Attachments map[string]Attachment `json:"Attachments,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go new file mode 100644 index 00000000000..b8142ca6a61 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "encoding/json" + +type ServiceProperties struct { + // Changed Properties field to []json.RawMessage from []interface{} to avoid having to + // remarshal sp.Properties[n] and unmarshal into the type(s) we want. + Properties []json.RawMessage `json:"Properties,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go new file mode 100644 index 00000000000..df9baa9219a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryConfiguration struct { + Regions []SharedMemoryRegion `json:"Regions,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go new file mode 100644 index 00000000000..825b71865d7 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegion struct { + SectionName string `json:"SectionName,omitempty"` + + StartOffset int32 `json:"StartOffset,omitempty"` + + Length int32 `json:"Length,omitempty"` + + AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` + + HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go new file mode 100644 index 00000000000..f67b08eb57a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegionInfo struct { + SectionName string `json:"SectionName,omitempty"` + + GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go new file mode 100644 index 00000000000..5eaf6a7f4a2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Silo job information +type SiloProperties struct { + Enabled bool `json:"Enabled,omitempty"` + + JobName string `json:"JobName,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go new file mode 100644 index 00000000000..ba7a6b3963b --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go @@ -0,0 +1,29 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Runtime statistics for a container +type Statistics struct { + Timestamp time.Time `json:"Timestamp,omitempty"` + + ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` + + Uptime100ns uint64 `json:"Uptime100ns,omitempty"` + + Processor *ProcessorStats `json:"Processor,omitempty"` + + Memory *MemoryStats `json:"Memory,omitempty"` + + Storage *StorageStats `json:"Storage,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go new file mode 100644 index 00000000000..2627af91323 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Storage struct { + + // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. + Layers []Layer `json:"Layers,omitempty"` + + // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. + Path string `json:"Path,omitempty"` + + QoS *StorageQoS `json:"QoS,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go new file mode 100644 index 00000000000..9c5e6eb5323 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type StorageQoS struct { + IopsMaximum int32 `json:"IopsMaximum,omitempty"` + + BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go new file mode 100644 index 00000000000..4f042ffd937 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Storage runtime statistics +type StorageStats struct { + ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` + + ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` + + WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` + + WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go new file mode 100644 index 00000000000..83486994036 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Topology struct { + Memory *Memory2 `json:"Memory,omitempty"` + + Processor *Processor2 `json:"Processor,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go new file mode 100644 index 00000000000..0e48ece500c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Uefi struct { + EnableDebugger bool `json:"EnableDebugger,omitempty"` + + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` + + BootThis *UefiBootEntry `json:"BootThis,omitempty"` + + Console string `json:"Console,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go new file mode 100644 index 00000000000..3ab409d825e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type UefiBootEntry struct { + DeviceType string `json:"DeviceType,omitempty"` + + DevicePath string `json:"DevicePath,omitempty"` + + DiskNumber int32 `json:"DiskNumber,omitempty"` + + OptionalData string `json:"OptionalData,omitempty"` + + VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go new file mode 100644 index 00000000000..2abfccca315 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Version struct { + Major int32 `json:"Major,omitempty"` + + Minor int32 `json:"Minor,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go new file mode 100644 index 00000000000..ec5d0fb936d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VideoMonitor struct { + HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` + + VerticalResolution int32 `json:"VerticalResolution,omitempty"` + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go new file mode 100644 index 00000000000..2d22b1bcb08 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -0,0 +1,32 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachine struct { + + // StopOnReset is private in the schema. If regenerated need to put back. + StopOnReset bool `json:"StopOnReset,omitempty"` + + Chipset *Chipset `json:"Chipset,omitempty"` + + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + + Devices *Devices `json:"Devices,omitempty"` + + GuestState *GuestState `json:"GuestState,omitempty"` + + RestoreState *RestoreState `json:"RestoreState,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go new file mode 100644 index 00000000000..91a3c83d4ff --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualNodeInfo struct { + VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` + + PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` + + VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` + + MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go new file mode 100644 index 00000000000..f5b7f3e38c0 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemController struct { + Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` + + MaximumCount uint32 `json:"MaximumCount,omitempty"` + + MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` + + Backing string `json:"Backing,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go new file mode 100644 index 00000000000..70cf2d90de0 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemDevice struct { + HostPath string `json:"HostPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 00000000000..9ef322f615b --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go new file mode 100644 index 00000000000..f5e05903c54 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciDevice struct { + Functions []VirtualPciFunction `json:",omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go new file mode 100644 index 00000000000..cedb7d18bc2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciFunction struct { + DeviceInstancePath string `json:",omitempty"` + + VirtualFunction uint16 `json:",omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go new file mode 100644 index 00000000000..362df363e13 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmb struct { + Shares []VirtualSmbShare `json:"Shares,omitempty"` + + DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go new file mode 100644 index 00000000000..915e9b6386a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShare struct { + Name string `json:"Name,omitempty"` + + Path string `json:"Path,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` + + Options *VirtualSmbShareOptions `json:"Options,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go new file mode 100644 index 00000000000..75196bd8c8d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go @@ -0,0 +1,62 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShareOptions struct { + ReadOnly bool `json:"ReadOnly,omitempty"` + + // convert exclusive access to shared read access + ShareRead bool `json:"ShareRead,omitempty"` + + // all opens will use cached I/O + CacheIo bool `json:"CacheIo,omitempty"` + + // disable oplock support + NoOplocks bool `json:"NoOplocks,omitempty"` + + // Acquire the backup privilege when attempting to open + TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` + + // Use the identity of the share root when opening + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + // disable Direct Mapping + NoDirectmap bool `json:"NoDirectmap,omitempty"` + + // disable Byterange locks + NoLocks bool `json:"NoLocks,omitempty"` + + // disable Directory CHange Notifications + NoDirnotify bool `json:"NoDirnotify,omitempty"` + + // share is use for VM shared memory + VmSharedMemory bool `json:"VmSharedMemory,omitempty"` + + // allow access only to the files specified in AllowedFiles + RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` + + // disable all oplocks except Level II + ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` + + // Allow the host to reparse this base layer + ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` + + // Enable pseudo-oplocks + PseudoOplocks bool `json:"PseudoOplocks,omitempty"` + + // All opens will use non-cached IO + NonCacheIo bool `json:"NonCacheIo,omitempty"` + + // Enable pseudo directory change notifications + PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` + + // Block directory enumeration, renames, and deletes. + SingleFileMapping bool `json:"SingleFileMapping,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go new file mode 100644 index 00000000000..8e1836dd6be --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VmMemory struct { + AvailableMemory int32 `json:"AvailableMemory,omitempty"` + + AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` + + ReservedMemory uint64 `json:"ReservedMemory,omitempty"` + + AssignedMemory uint64 `json:"AssignedMemory,omitempty"` + + SlpActive bool `json:"SlpActive,omitempty"` + + BalancingEnabled bool `json:"BalancingEnabled,omitempty"` + + DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go new file mode 100644 index 00000000000..de1b9cf1ae2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 `json:"Limit,omitempty"` + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 `json:"Weight,omitempty"` + // Minimum amount of host CPU resources that the virtual machine is guaranteed. + Reservation uint64 `json:"Reservation,omitempty"` + // Provides the target maximum CPU frequency, in MHz, for a virtual machine. + MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go new file mode 100644 index 00000000000..8ed7e566d64 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type WindowsCrashReporting struct { + DumpFileName string `json:"DumpFileName,omitempty"` + + MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go new file mode 100644 index 00000000000..a634dfc1515 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -0,0 +1,49 @@ +package hcs + +import ( + "context" + "encoding/json" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +// GetServiceProperties returns properties of the host compute service. +func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { + operation := "hcs::GetServiceProperties" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.ServiceProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, err + } + return properties, nil +} + +// ModifyServiceSettings modifies settings of the host compute service. +func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { + operation := "hcs::ModifyServiceSettings" + + settingsJSON, err := json.Marshal(settings) + if err != nil { + return err + } + resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return &HcsError{Op: operation, Err: err, Events: events} + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go new file mode 100644 index 00000000000..a76f6b253e2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -0,0 +1,815 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/cow" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/jobobject" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +type System struct { + handleLock sync.RWMutex + handle vmcompute.HcsSystem + id string + callbackNumber uintptr + + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error + exitError error + os, typ, owner string + startTime time.Time +} + +func newSystem(id string) *System { + return &System{ + id: id, + waitBlock: make(chan struct{}), + } +} + +// Implementation detail for silo naming, this should NOT be relied upon very heavily. +func siloNameFmt(containerID string) string { + return fmt.Sprintf(`\Container_%s`, containerID) +} + +// CreateComputeSystem creates a new compute system with the given configuration but does not start it. +func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcs::CreateComputeSystem" + + // hcsCreateComputeSystemContext is an async operation. Start the outer span + // here to measure the full create time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", id)) + + computeSystem := newSystem(id) + + hcsDocumentB, err := json.Marshal(hcsDocumentInterface) + if err != nil { + return nil, err + } + + hcsDocument := string(hcsDocumentB) + + var ( + identity syscall.Handle + resultJSON string + createError error + ) + computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) + if createError == nil || IsPending(createError) { + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + return nil, makeSystemError(computeSystem, operation, err, nil) + } + } + + events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + if err != nil { + if err == ErrTimeout { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + } + return nil, makeSystemError(computeSystem, operation, err, events) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +// OpenComputeSystem opens an existing compute system by ID. +func OpenComputeSystem(ctx context.Context, id string) (*System, error) { + operation := "hcs::OpenComputeSystem" + + computeSystem := newSystem(id) + handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + computeSystem.handle = handle + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +func (computeSystem *System) getCachedProperties(ctx context.Context) error { + props, err := computeSystem.Properties(ctx) + if err != nil { + return err + } + computeSystem.typ = strings.ToLower(props.SystemType) + computeSystem.os = strings.ToLower(props.RuntimeOSType) + computeSystem.owner = strings.ToLower(props.Owner) + if computeSystem.os == "" && computeSystem.typ == "container" { + // Pre-RS5 HCS did not return the OS, but it only supported containers + // that ran Windows. + computeSystem.os = "windows" + } + return nil +} + +// OS returns the operating system of the compute system, "linux" or "windows". +func (computeSystem *System) OS() string { + return computeSystem.os +} + +// IsOCI returns whether processes in the compute system should be created via +// OCI. +func (computeSystem *System) IsOCI() bool { + return computeSystem.os == "linux" && computeSystem.typ == "container" +} + +// GetComputeSystems gets a list of the compute systems on the system that match the query +func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { + operation := "hcs::GetComputeSystems" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + + computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if computeSystemsJSON == "" { + return nil, ErrUnexpectedValue + } + computeSystems := []schema1.ContainerProperties{} + if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { + return nil, err + } + + return computeSystems, nil +} + +// Start synchronously starts the computeSystem. +func (computeSystem *System) Start(ctx context.Context) (err error) { + operation := "hcs::System::Start" + + // hcsStartComputeSystemContext is an async operation. Start the outer span + // here to measure the full start time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + computeSystem.startTime = time.Now() + return nil +} + +// ID returns the compute system's identifier. +func (computeSystem *System) ID() string { + return computeSystem.id +} + +// Shutdown requests a compute system shutdown. +func (computeSystem *System) Shutdown(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Shutdown" + + if computeSystem.handle == 0 { + return nil + } + + resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// Terminate requests a compute system terminate. +func (computeSystem *System) Terminate(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Terminate" + + if computeSystem.handle == 0 { + return nil + } + + resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// waitBackground waits for the compute system exit notification. Once received +// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `computeSystem.handle` but `Wait` is +// safe to call multiple times. +func (computeSystem *System) waitBackground() { + operation := "hcs::System::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + switch err { + case nil: + log.G(ctx).Debug("system exited") + case ErrVmcomputeUnexpectedExit: + log.G(ctx).Debug("unexpected system exit") + computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) + err = nil + default: + err = makeSystemError(computeSystem, operation, err, nil) + } + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = err + close(computeSystem.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +func (computeSystem *System) WaitChannel() <-chan struct{} { + return computeSystem.waitBlock +} + +func (computeSystem *System) WaitError() error { + return computeSystem.waitError +} + +// Wait synchronously waits for the compute system to shutdown or terminate. If +// the compute system has already exited returns the previous error (if any). +func (computeSystem *System) Wait() error { + <-computeSystem.WaitChannel() + return computeSystem.WaitError() +} + +// ExitError returns an error describing the reason the compute system terminated. +func (computeSystem *System) ExitError() error { + select { + case <-computeSystem.waitBlock: + if computeSystem.waitError != nil { + return computeSystem.waitError + } + return computeSystem.exitError + default: + return errors.New("container not exited") + } +} + +// Properties returns the requested container properties targeting a V1 schema container. +func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Properties" + + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &schema1.ContainerProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil +} + +// queryInProc handles querying for container properties without reaching out to HCS. `props` +// will be updated to contain any data returned from the queries present in `types`. If any properties +// failed to be queried they will be tallied up and returned in as the first return value. Failures on +// query are NOT considered errors; the only failure case for this method is if the containers job object +// cannot be opened. +func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { + // In the future we can make use of some new functionality in the HCS that allows you + // to pass a job object for HCS to use for the container. Currently, the only way we'll + // be able to open the job/silo is if we're running as SYSTEM. + jobOptions := &jobobject.Options{ + UseNTVariant: true, + Name: siloNameFmt(computeSystem.id), + } + job, err := jobobject.Open(ctx, jobOptions) + if err != nil { + return nil, err + } + defer job.Close() + + var fallbackQueryTypes []hcsschema.PropertyType + for _, propType := range types { + switch propType { + case hcsschema.PTStatistics: + // Handle a bad caller asking for the same type twice. No use in re-querying if this is + // filled in already. + if props.Statistics == nil { + props.Statistics, err = computeSystem.statisticsInProc(job) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") + + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + default: + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + + return fallbackQueryTypes, nil +} + +// statisticsInProc emulates what HCS does to grab statistics for a given container with a small +// change to make grabbing the private working set total much more efficient. +func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { + // Start timestamp for these stats before we grab them to match HCS + timestamp := time.Now() + + memInfo, err := job.QueryMemoryStats() + if err != nil { + return nil, err + } + + processorInfo, err := job.QueryProcessorStats() + if err != nil { + return nil, err + } + + storageInfo, err := job.QueryStorageStats() + if err != nil { + return nil, err + } + + // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation + // with the class SystemProcessInformation which returns an array containing system information for *every* + // process running on the machine. They then grab the pids that are running in the container and filter down + // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't + // work well as performance should get worse if more processess are running on the machine in general and not + // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored + // as well which isn't great and is wasted work to fetch. + // + // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private + // working set ourselves and ask for everything else seperately. The optimization we can make here is + // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating + // the private working set in a more efficient manner by: + // + // 1. Find the pids running in the silo + // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) + // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters + // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. + privateWorkingSet, err := job.QueryPrivateWorkingSet() + if err != nil { + return nil, err + } + + return &hcsschema.Statistics{ + Timestamp: timestamp, + ContainerStartTime: computeSystem.startTime, + Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, + Memory: &hcsschema.MemoryStats{ + MemoryUsageCommitBytes: memInfo.JobMemory, + MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, + MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, + }, + Processor: &hcsschema.ProcessorStats{ + RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), + RuntimeUser100ns: uint64(processorInfo.TotalUserTime), + TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), + }, + Storage: &hcsschema.StorageStats{ + ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), + ReadSizeBytes: storageInfo.ReadStats.TotalSize, + WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), + WriteSizeBytes: storageInfo.WriteStats.TotalSize, + }, + }, nil +} + +// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. +func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { + operation := "hcs::System::PropertiesV2" + + queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + props := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return props, nil +} + +// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + // Let HCS tally up the total for VM based queries instead of querying ourselves. + if computeSystem.typ != "container" { + return computeSystem.hcsPropertiesV2Query(ctx, types) + } + + // Define a starter Properties struct with the default fields returned from every + // query. Owner is only returned from Statistics but it's harmless to include. + properties := &hcsschema.Properties{ + Id: computeSystem.id, + SystemType: computeSystem.typ, + RuntimeOsType: computeSystem.os, + Owner: computeSystem.owner, + } + + logEntry := log.G(ctx) + // First lets try and query ourselves without reaching to HCS. If any of the queries fail + // we'll take note and fallback to querying HCS for any of the failed types. + fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) + if err == nil && len(fallbackTypes) == 0 { + return properties, nil + } else if err != nil { + logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) + fallbackTypes = types + } + + logEntry.WithFields(logrus.Fields{ + logfields.ContainerID: computeSystem.id, + "propertyTypes": fallbackTypes, + }).Info("falling back to HCS for property type queries") + + hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) + if err != nil { + return nil, err + } + + // Now add in anything that we might have successfully queried in process. + if properties.Statistics != nil { + hcsProperties.Statistics = properties.Statistics + hcsProperties.Owner = properties.Owner + } + + // For future support for querying processlist in-proc as well. + if properties.ProcessList != nil { + hcsProperties.ProcessList = properties.ProcessList + } + + return hcsProperties, nil +} + +// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Pause(ctx context.Context) (err error) { + operation := "hcs::System::Pause" + + // hcsPauseComputeSystemContext is an async peration. Start the outer span + // here to measure the full pause time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Resume(ctx context.Context) (err error) { + operation := "hcs::System::Resume" + + // hcsResumeComputeSystemContext is an async operation. Start the outer span + // here to measure the full restore time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Save the compute system +func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { + operation := "hcs::System::Save" + + // hcsSaveComputeSystemContext is an async peration. Start the outer span + // here to measure the full save time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + saveOptions, err := json.Marshal(options) + if err != nil { + return err + } + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, nil) + } + + configuration := string(configurationb) + processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, events) + } + + log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") + return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil +} + +// CreateProcess launches a new process within the computeSystem. +func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { + operation := "hcs::System::CreateProcess" + process, processInfo, err := computeSystem.createProcess(ctx, operation, c) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + process.Close() + } + }() + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + process.stdin = pipes[0] + process.stdout = pipes[1] + process.stderr = pipes[2] + process.hasCachedStdio = true + + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// OpenProcess gets an interface to an existing process within the computeSystem. +func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::OpenProcess" + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + process := newProcess(processHandle, pid, computeSystem) + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// Close cleans up any state associated with the compute system but does not terminate or wait for it. +func (computeSystem *System) Close() (err error) { + operation := "hcs::System::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.Lock() + defer computeSystem.handleLock.Unlock() + + // Don't double free this + if computeSystem.handle == 0 { + return nil + } + + if err = computeSystem.unregisterCallback(ctx); err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) + if err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + computeSystem.handle = 0 + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = ErrAlreadyClosed + close(computeSystem.waitBlock) + }) + + return nil +} + +func (computeSystem *System) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newSystemChannels(), + systemID: computeSystem.id, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + computeSystem.callbackNumber = callbackNumber + + return nil +} + +func (computeSystem *System) unregisterCallback(ctx context.Context) error { + callbackNumber := computeSystem.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterComputeSystemCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} + +// Modify the System by sending a request to HCS +func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Modify" + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + requestBytes, err := json.Marshal(config) + if err != nil { + return err + } + + requestJSON := string(requestBytes) + resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go new file mode 100644 index 00000000000..3342e5bb948 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -0,0 +1,62 @@ +package hcs + +import ( + "context" + "io" + "syscall" + + "github.com/Microsoft/go-winio" + diskutil "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/computestorage" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// if there is an error. +func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { + fs := make([]io.ReadWriteCloser, len(hs)) + for i, h := range hs { + if h != syscall.Handle(0) { + if err == nil { + fs[i], err = winio.MakeOpenFile(h) + } + if err != nil { + syscall.Close(h) + } + } + } + if err != nil { + for _, f := range fs { + if f != nil { + f.Close() + } + } + return nil, err + } + return fs, nil +} + +// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. +func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { + if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { + return errors.Wrap(err, "failed to create VHD") + } + + vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) + if err != nil { + return errors.Wrap(err, "failed to open VHD") + } + defer func() { + err2 := windows.CloseHandle(windows.Handle(vhd)) + if err == nil { + err = errors.Wrap(err2, "failed to close VHD") + } + }() + + if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { + return errors.Wrap(err, "failed to format VHD") + } + + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go new file mode 100644 index 00000000000..db4e14fdfb2 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -0,0 +1,68 @@ +package hcs + +import ( + "context" + "time" + + "github.com/Microsoft/hcsshim/internal/log" +) + +func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { + events := processHcsResult(ctx, resultJSON) + if IsPending(err) { + return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) + } + + return events, err +} + +func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { + callbackMapLock.RLock() + if _, ok := callbackMap[callbackNumber]; !ok { + callbackMapLock.RUnlock() + log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") + return ErrHandleClose + } + channels := callbackMap[callbackNumber].channels + callbackMapLock.RUnlock() + + expectedChannel := channels[expectedNotification] + if expectedChannel == nil { + log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") + return ErrInvalidNotificationType + } + + var c <-chan time.Time + if timeout != nil { + timer := time.NewTimer(*timeout) + c = timer.C + defer timer.Stop() + } + + select { + case err, ok := <-expectedChannel: + if !ok { + return ErrHandleClose + } + return err + case err, ok := <-channels[hcsNotificationSystemExited]: + if !ok { + return ErrHandleClose + } + // If the expected notification is hcsNotificationSystemExited which of the two selects + // chosen is random. Return the raw error if hcsNotificationSystemExited is expected + if channels[hcsNotificationSystemExited] == expectedChannel { + return err + } + return ErrUnexpectedContainerExit + case _, ok := <-channels[hcsNotificationServiceDisconnect]: + if !ok { + return ErrHandleClose + } + // hcsNotificationServiceDisconnect should never be an expected notification + // it does not need the same handling as hcsNotificationSystemExited + return ErrUnexpectedProcessAbort + case <-c: + return ErrTimeout + } +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go new file mode 100644 index 00000000000..921c2c8556c --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -0,0 +1,47 @@ +package hcserror + +import ( + "fmt" + "syscall" +) + +const ERROR_GEN_FAILURE = syscall.Errno(31) + +type HcsError struct { + title string + rest string + Err error +} + +func (e *HcsError) Error() string { + s := e.title + if len(s) > 0 && s[len(s)-1] != ' ' { + s += " " + } + s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) + if e.rest != "" { + if e.rest[0] != ' ' { + s += " " + } + s += e.rest + } + return s +} + +func New(err error, title, rest string) error { + // Pass through DLL errors directly since they do not originate from HCS. + if _, ok := err.(*syscall.DLLError); ok { + return err + } + return &HcsError{title, rest, err} +} + +func Win32FromError(err error) uint32 { + if herr, ok := err.(*HcsError); ok { + return Win32FromError(herr.Err) + } + if code, ok := err.(syscall.Errno); ok { + return uint32(code) + } + return uint32(ERROR_GEN_FAILURE) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go new file mode 100644 index 00000000000..922f7c679e0 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -0,0 +1,23 @@ +package interop + +import ( + "syscall" + "unsafe" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go + +//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree + +func ConvertAndFreeCoTaskMemString(buffer *uint16) string { + str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) + coTaskMemFree(unsafe.Pointer(buffer)) + return str +} + +func Win32FromHresult(hr uintptr) syscall.Errno { + if hr&0x1fff0000 == 0x00070000 { + return syscall.Errno(hr & 0xffff) + } + return syscall.Errno(hr) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go new file mode 100644 index 00000000000..12b0c71c5ae --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -0,0 +1,48 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package interop + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") + + procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") +) + +func coTaskMemFree(buffer unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go new file mode 100644 index 00000000000..5d6acd69e61 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -0,0 +1,111 @@ +package jobobject + +import ( + "context" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ioInitOnce sync.Once + initIOErr error + // Global iocp handle that will be re-used for every job object + ioCompletionPort windows.Handle + // Mapping of job handle to queue to place notifications in. + jobMap sync.Map +) + +// MsgAllProcessesExited is a type representing a message that every process in a job has exited. +type MsgAllProcessesExited struct{} + +// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. +// This should not be treated as an error. +type MsgUnimplemented struct{} + +// pollIOCP polls the io completion port forever. +func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { + var ( + overlapped uintptr + code uint32 + key uintptr + ) + + for { + err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) + if err != nil { + log.G(ctx).WithError(err).Error("failed to poll for job object message") + continue + } + if val, ok := jobMap.Load(key); ok { + msq, ok := val.(*queue.MessageQueue) + if !ok { + log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") + continue + } + notification, err := parseMessage(code, overlapped) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("failed to parse job object message") + continue + } + if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + // Write will only return an error when the queue is closed. + // The only time a queue would ever be closed is when we call `Close` on + // the job it belongs to which also removes it from the jobMap, so something + // went wrong here. We can't return as this is reading messages for all jobs + // so just log it and move on. + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("tried to write to a closed queue") + continue + } + } else { + log.G(ctx).Warn("received a message for a job not present in the mapping") + } + } +} + +func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { + // Check code and parse out relevant information related to that notification + // that we care about. For now all we handle is the message that all processes + // in the job have exited. + switch code { + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: + return MsgAllProcessesExited{}, nil + // Other messages for completeness and a check to make sure that if we fall + // into the default case that this is a code we don't know how to handle. + case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: + case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: + case winapi.JOB_OBJECT_MSG_NEW_PROCESS: + case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: + default: + return nil, fmt.Errorf("unknown job notification type: %d", code) + } + return MsgUnimplemented{}, nil +} + +// Assigns an IO completion port to get notified of events for the registered job +// object. +func attachIOCP(job windows.Handle, iocp windows.Handle) error { + info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ + CompletionKey: job, + CompletionPort: iocp, + } + _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) + return err +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go new file mode 100644 index 00000000000..c9fdd921a7f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -0,0 +1,538 @@ +package jobobject + +import ( + "context" + "errors" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +// This file provides higher level constructs for the win32 job object API. +// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" +// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information +// structs and associated limit flags. Whatever is not present from the job object API +// in golang.org/x/sys/windows is located in /internal/winapi. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects + +// JobObject is a high level wrapper around a Windows job object. Holds a handle to +// the job, a queue to receive iocp notifications about the lifecycle +// of the job and a mutex for synchronized handle access. +type JobObject struct { + handle windows.Handle + mq *queue.MessageQueue + handleLock sync.RWMutex +} + +// JobLimits represents the resource constraints that can be applied to a job object. +type JobLimits struct { + CPULimit uint32 + CPUWeight uint32 + MemoryLimitInBytes uint64 + MaxIOPS int64 + MaxBandwidth int64 +} + +type CPURateControlType uint32 + +const ( + WeightBased CPURateControlType = iota + RateBased +) + +// Processor resource controls +const ( + cpuLimitMin = 1 + cpuLimitMax = 10000 + cpuWeightMin = 1 + cpuWeightMax = 9 +) + +var ( + ErrAlreadyClosed = errors.New("the handle has already been closed") + ErrNotRegistered = errors.New("job is not registered to receive notifications") +) + +// Options represents the set of configurable options when making or opening a job object. +type Options struct { + // `Name` specifies the name of the job object if a named job object is desired. + Name string + // `Notifications` specifies if the job will be registered to receive notifications. + // Defaults to false. + Notifications bool + // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. + // Defaults to false. + UseNTVariant bool + // `IOTracking` enables tracking I/O statistics on the job object. More specifically this + // calls SetInformationJobObject with the JobObjectIoAttribution class. + EnableIOTracking bool +} + +// Create creates a job object. +// +// If options.Name is an empty string, the job will not be assigned a name. +// +// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`. +// +// If `options` is nil, use default option values. +// +// Returns a JobObject structure and an error if there is one. +func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil { + options = &Options{} + } + + var jobName *winapi.UnicodeString + if options.Name != "" { + jobName, err = winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + } + + var jobHandle windows.Handle + if options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: jobName, + Attributes: 0, + } + status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + var jobNameBuf *uint16 + if jobName != nil && jobName.Buffer != nil { + jobNameBuf = jobName.Buffer + } + jobHandle, err = windows.CreateJobObject(nil, jobNameBuf) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + if options.EnableIOTracking { + if err := enableIOTracking(jobHandle); err != nil { + return nil, err + } + } + + return job, nil +} + +// Open opens an existing job object with name provided in `options`. If no name is provided +// return an error since we need to know what job object to open. +// +// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`. +// +// Returns a JobObject structure and an error if there is one. +func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil || (options != nil && options.Name == "") { + return nil, errors.New("no job object name specified to open") + } + + unicodeJobName, err := winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + + var jobHandle windows.Handle + if options != nil && options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: unicodeJobName, + Attributes: 0, + } + status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options != nil && options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + return job, nil +} + +// helper function to setup notifications for creating/opening a job object +func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInitOnce.Do(func() { + h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + initIOErr = err + return + } + ioCompletionPort = h + go pollIOCP(ctx, h) + }) + + if initIOErr != nil { + return nil, initIOErr + } + + mq := queue.NewMessageQueue() + jobMap.Store(uintptr(job.handle), mq) + if err := attachIOCP(job.handle, ioCompletionPort); err != nil { + jobMap.Delete(uintptr(job.handle)) + return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err) + } + return mq, nil +} + +// PollNotification will poll for a job object notification. This call should only be called once +// per job (ideally in a goroutine loop) and will block if there is not a notification ready. +// This call will return immediately with error `ErrNotRegistered` if the job was not registered +// to receive notifications during `Create`. Internally, messages will be queued and there +// is no worry of messages being dropped. +func (job *JobObject) PollNotification() (interface{}, error) { + if job.mq == nil { + return nil, ErrNotRegistered + } + return job.mq.Dequeue() +} + +// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to +// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process +// has already started running. +func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := attrList.Update( + winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST, + unsafe.Pointer(&job.handle), + unsafe.Sizeof(job.handle), + ); err != nil { + return fmt.Errorf("failed to update proc thread attributes for job object: %w", err) + } + + return nil +} + +// Close closes the job object handle. +func (job *JobObject) Close() error { + job.handleLock.Lock() + defer job.handleLock.Unlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := windows.Close(job.handle); err != nil { + return err + } + + if job.mq != nil { + job.mq.Close() + } + // Handles now invalid so if the map entry to receive notifications for this job still + // exists remove it so we can stop receiving notifications. + if _, ok := jobMap.Load(uintptr(job.handle)); ok { + jobMap.Delete(uintptr(job.handle)) + } + + job.handle = 0 + return nil +} + +// Assign assigns a process to the job object. +func (job *JobObject) Assign(pid uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if pid == 0 { + return errors.New("invalid pid: 0") + } + hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) + if err != nil { + return err + } + defer windows.Close(hProc) + return windows.AssignProcessToJobObject(job.handle, hProc) +} + +// Terminate terminates the job, essentially calls TerminateProcess on every process in the +// job. +func (job *JobObject) Terminate(exitCode uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + if job.handle == 0 { + return ErrAlreadyClosed + } + return windows.TerminateJobObject(job.handle, exitCode) +} + +// Pids returns all of the process IDs in the job object. +func (job *JobObject) Pids() ([]uint32, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{} + err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ) + + // This is either the case where there is only one process or no processes in + // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList + // is 1 and just return this, otherwise return an empty slice. + if err == nil { + if info.NumberOfProcessIdsInList == 1 { + return []uint32{uint32(info.ProcessIdList[0])}, nil + } + // Return empty slice instead of nil to play well with the caller of this. + // Do not return an error if no processes are running inside the job + return []uint32{}, nil + } + + if err != winapi.ERROR_MORE_DATA { + return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) + } + + jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1)) + buf := make([]byte, jobBasicProcessIDListSize) + if err = winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&buf[0]), + uint32(len(buf)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err) + } + + bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0])) + pids := make([]uint32, bufInfo.NumberOfProcessIdsInList) + for i, bufPid := range bufInfo.AllPids() { + pids[i] = uint32(bufPid) + } + return pids, nil +} + +// QueryMemoryStats gets the memory stats for the job object. +func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectMemoryUsageInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object memory stats: %w", err) + } + return &info, nil +} + +// QueryProcessorStats gets the processor stats for the job object. +func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicAccountingInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object process stats: %w", err) + } + return &info, nil +} + +// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error +// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking() +// hasn't been called since creation of the job. +func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectIoAttribution, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object storage stats: %w", err) + } + return &info, nil +} + +// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the +// private working set for every process running in the job. +func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { + pids, err := job.Pids() + if err != nil { + return 0, err + } + + openAndQueryWorkingSet := func(pid uint32) (uint64, error) { + h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) + if err != nil { + // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a + // case where one of the pids in the job exited before we open. + return 0, nil + } + defer func() { + _ = windows.Close(h) + }() + // Check if the process is actually running in the job still. There's a small chance + // that the process could have exited and had its pid re-used between grabbing the pids + // in the job and opening the handle to it above. + var inJob int32 + if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil { + // This shouldn't fail unless we have incorrect access rights which we control + // here so probably best to error out if this failed. + return 0, err + } + // Don't report stats for this process as it's not running in the job. This shouldn't be + // an error condition though. + if inJob == 0 { + return 0, nil + } + + var vmCounters winapi.VM_COUNTERS_EX2 + status := winapi.NtQueryInformationProcess( + h, + winapi.ProcessVmCounters, + unsafe.Pointer(&vmCounters), + uint32(unsafe.Sizeof(vmCounters)), + nil, + ) + if !winapi.NTSuccess(status) { + return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status)) + } + return uint64(vmCounters.PrivateWorkingSetSize), nil + } + + var jobWorkingSetSize uint64 + for _, pid := range pids { + workingSet, err := openAndQueryWorkingSet(pid) + if err != nil { + return 0, err + } + jobWorkingSetSize += workingSet + } + + return jobWorkingSetSize, nil +} + +// SetIOTracking enables IO tracking for processes in the job object. +// This enables use of the QueryStorageStats method. +func (job *JobObject) SetIOTracking() error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + return enableIOTracking(job.handle) +} + +func enableIOTracking(job windows.Handle) error { + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if _, err := windows.SetInformationJobObject( + job, + winapi.JobObjectIoAttribution, + uintptr(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info)), + ); err != nil { + return fmt.Errorf("failed to enable IO tracking on job object: %w", err) + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go new file mode 100644 index 00000000000..4efde292c49 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -0,0 +1,315 @@ +package jobobject + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +const ( + memoryLimitMax uint64 = 0xffffffffffffffff +) + +func isFlagSet(flag, controlFlags uint32) bool { + return (flag & controlFlags) == flag +} + +// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). +func (job *JobObject) SetResourceLimits(limits *JobLimits) error { + // Go through and check what limits were specified and apply them to the job. + if limits.MemoryLimitInBytes != 0 { + if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { + return fmt.Errorf("failed to set job object memory limit: %w", err) + } + } + + if limits.CPULimit != 0 { + if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } else if limits.CPUWeight != 0 { + if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } + + if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { + if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { + return fmt.Errorf("failed to set io limit on job object: %w", err) + } + } + return nil +} + +// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate +// all processes in the job on the last open handle being closed. +func (job *JobObject) SetTerminateOnLastHandleClose() error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE + return job.setExtendedInformation(info) +} + +// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. +func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { + if memoryLimitInBytes >= memoryLimitMax { + return errors.New("memory limit specified exceeds the max size") + } + + info, err := job.getExtendedInformation() + if err != nil { + return err + } + + info.JobMemoryLimit = uintptr(memoryLimitInBytes) + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY + return job.setExtendedInformation(info) +} + +// GetMemoryLimit gets the memory limit in bytes of the job object. +func (job *JobObject) GetMemoryLimit() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.JobMemoryLimit), nil +} + +// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to +// `rateControlValue` for the job object. +func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { + cpuInfo, err := job.getCPURateControlInformation() + if err != nil { + return err + } + switch rateControlType { + case WeightBased: + if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { + return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + cpuInfo.Value = rateControlValue + case RateBased: + if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { + return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + cpuInfo.Value = rateControlValue + default: + return errors.New("invalid job object cpu rate control type") + } + return job.setCPURateControlInfo(cpuInfo) +} + +// GetCPULimit gets the cpu limits for the job object. +// `rateControlType` is used to indicate what type of cpu limit to query for. +func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { + info, err := job.getCPURateControlInformation() + if err != nil { + return 0, err + } + + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { + return 0, errors.New("the job does not have cpu rate control enabled") + } + + switch rateControlType { + case WeightBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { + return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") + } + case RateBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { + return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") + } + default: + return 0, errors.New("invalid job object cpu rate control type") + } + return info.Value, nil +} + +// SetCPUAffinity sets the processor affinity for the job object. +// The affinity is passed in as a bitmask. +func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) + return job.setExtendedInformation(info) +} + +// GetCPUAffinity gets the processor affinity for the job object. +// The returned affinity is a bitmask. +func (job *JobObject) GetCPUAffinity() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.BasicLimitInformation.Affinity), nil +} + +// SetIOLimit sets the IO limits specified on the job object. +func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { + ioInfo, err := job.getIOLimit() + if err != nil { + return err + } + ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE + if maxBandwidth != 0 { + ioInfo.MaxBandwidth = maxBandwidth + } + if maxIOPS != 0 { + ioInfo.MaxIops = maxIOPS + } + return job.setIORateControlInfo(ioInfo) +} + +// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. +func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxBandwidth, nil +} + +// GetIOMaxIopsLimit gets the max iops for the job object. +func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxIops, nil +} + +// Helper function for getting a job object's extended information. +func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for getting a job object's CPU rate control information. +func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for setting a job object's extended information. +func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + uintptr(unsafe.Pointer(info)), + uint32(unsafe.Sizeof(*info)), + ); err != nil { + return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) + } + return nil +} + +// Helper function for querying job handle for IO limit information. +func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} + var blockCount uint32 = 1 + + if _, err := winapi.QueryIoRateControlInformationJobObject( + job.handle, + nil, + &ioInfo, + &blockCount, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) + } + + if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { + return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) + } + return ioInfo, nil +} + +// Helper function for setting a job object's IO rate control information. +func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { + return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) + } + return nil +} + +// Helper function for setting a job object's CPU rate control information. +func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + uintptr(unsafe.Pointer(cpuInfo)), + uint32(unsafe.Sizeof(cpuInfo)), + ); err != nil { + return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/log/g.go new file mode 100644 index 00000000000..ba6b1a4a53a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/log/g.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` +// contains an OpenCensus `trace.Span`. +func G(ctx context.Context) *logrus.Entry { + span := trace.FromContext(ctx) + if span != nil { + sctx := span.SpanContext() + return logrus.WithFields(logrus.Fields{ + "traceID": sctx.TraceID.String(), + "spanID": sctx.SpanID.String(), + // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? + }) + } + return logrus.NewEntry(logrus.StandardLogger()) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go new file mode 100644 index 00000000000..cf2c166d9b8 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -0,0 +1,32 @@ +package logfields + +const ( + // Identifiers + + ContainerID = "cid" + UVMID = "uvm-id" + ProcessID = "pid" + + // Common Misc + + // Timeout represents an operation timeout. + Timeout = "timeout" + JSON = "json" + + // Keys/values + + Field = "field" + OCIAnnotation = "oci-annotation" + Value = "value" + + // Golang type's + + ExpectedType = "expected-type" + Bool = "bool" + Uint32 = "uint32" + Uint64 = "uint64" + + // runhcs + + VMShimOperation = "vmshim-op" +) diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go new file mode 100644 index 00000000000..f428bdaf720 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -0,0 +1,43 @@ +package oc + +import ( + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +var _ = (trace.Exporter)(&LogrusExporter{}) + +// LogrusExporter is an OpenCensus `trace.Exporter` that exports +// `trace.SpanData` to logrus output. +type LogrusExporter struct { +} + +// ExportSpan exports `s` based on the the following rules: +// +// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, +// `s.ParentSpanID` for correlation +// +// 2. Any calls to .Annotate will not be supported. +// +// 3. The span itself will be written at `logrus.InfoLevel` unless +// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` +// providing `s.Status.Message` as the error value. +func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { + // Combine all span annotations with traceID, spanID, parentSpanID + baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) + baseEntry.Data["traceID"] = s.TraceID.String() + baseEntry.Data["spanID"] = s.SpanID.String() + baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() + baseEntry.Data["startTime"] = s.StartTime + baseEntry.Data["endTime"] = s.EndTime + baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() + baseEntry.Data["name"] = s.Name + baseEntry.Time = s.StartTime + + level := logrus.InfoLevel + if s.Status.Code != 0 { + level = logrus.ErrorLevel + baseEntry.Data[logrus.ErrorKey] = s.Status.Message + } + baseEntry.Log(level, "Span") +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go new file mode 100644 index 00000000000..fee4765cbc4 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -0,0 +1,17 @@ +package oc + +import ( + "go.opencensus.io/trace" +) + +// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If +// `err` is `nil` assumes `trace.StatusCodeOk`. +func SetSpanStatus(span *trace.Span, err error) { + status := trace.Status{} + if err != nil { + // TODO: JTERRY75 - Handle errors in a non-generic way + status.Code = trace.StatusCodeUnknown + status.Message = err.Error() + } + span.SetStatus(status) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go new file mode 100644 index 00000000000..4eb9bb9f1f3 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go @@ -0,0 +1,92 @@ +package queue + +import ( + "errors" + "sync" +) + +var ErrQueueClosed = errors.New("the queue is closed for reading and writing") + +// MessageQueue represents a threadsafe message queue to be used to retrieve or +// write messages to. +type MessageQueue struct { + m *sync.RWMutex + c *sync.Cond + messages []interface{} + closed bool +} + +// NewMessageQueue returns a new MessageQueue. +func NewMessageQueue() *MessageQueue { + m := &sync.RWMutex{} + return &MessageQueue{ + m: m, + c: sync.NewCond(m), + messages: []interface{}{}, + } +} + +// Enqueue writes `msg` to the queue. +func (mq *MessageQueue) Enqueue(msg interface{}) error { + mq.m.Lock() + defer mq.m.Unlock() + + if mq.closed { + return ErrQueueClosed + } + mq.messages = append(mq.messages, msg) + // Signal a waiter that there is now a value available in the queue. + mq.c.Signal() + return nil +} + +// Dequeue will read a value from the queue and remove it. If the queue +// is empty, this will block until the queue is closed or a value gets enqueued. +func (mq *MessageQueue) Dequeue() (interface{}, error) { + mq.m.Lock() + defer mq.m.Unlock() + + for !mq.closed && mq.size() == 0 { + mq.c.Wait() + } + + // We got woken up, check if it's because the queue got closed. + if mq.closed { + return nil, ErrQueueClosed + } + + val := mq.messages[0] + mq.messages[0] = nil + mq.messages = mq.messages[1:] + return val, nil +} + +// Size returns the size of the queue. +func (mq *MessageQueue) Size() int { + mq.m.RLock() + defer mq.m.RUnlock() + return mq.size() +} + +// Nonexported size check to check if the queue is empty inside already locked functions. +func (mq *MessageQueue) size() int { + return len(mq.messages) +} + +// Close closes the queue for future writes or reads. Any attempts to read or write from the +// queue after close will return ErrQueueClosed. This is safe to call multiple times. +func (mq *MessageQueue) Close() { + mq.m.Lock() + defer mq.m.Unlock() + + // Already closed, noop + if mq.closed { + return + } + + mq.messages = nil + mq.closed = true + // If there's anybody currently waiting on a value from Dequeue, we need to + // broadcast so the read(s) can return ErrQueueClosed. + mq.c.Broadcast() +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go new file mode 100644 index 00000000000..dcbc9334d76 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go @@ -0,0 +1,288 @@ +package regstate + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" + "reflect" + "syscall" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW + +const ( + _REG_OPTION_VOLATILE = 1 + + _REG_OPENED_EXISTING_KEY = 2 +) + +type Key struct { + registry.Key + Name string +} + +var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"} +var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} + +var rootPath = `SOFTWARE\Microsoft\runhcs` + +type NotFoundError struct { + ID string +} + +func (err *NotFoundError) Error() string { + return fmt.Sprintf("ID '%s' was not found", err.ID) +} + +func IsNotFoundError(err error) bool { + _, ok := err.(*NotFoundError) + return ok +} + +type NoStateError struct { + ID string + Key string +} + +func (err *NoStateError) Error() string { + return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID) +} + +func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) { + var ( + h syscall.Handle + d uint32 + ) + fullpath := filepath.Join(k.Name, path) + pathPtr, _ := windows.UTF16PtrFromString(path) + err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) + if err != nil { + return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} + } + return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil +} + +func hive(perUser bool) *Key { + r := localMachine + if perUser { + r = localUser + } + return r +} + +func Open(root string, perUser bool) (*Key, error) { + k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS) + if err != nil { + return nil, err + } + defer k.Close() + + k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS) + if err != nil { + return nil, err + } + return k2, nil +} + +func RemoveAll(root string, perUser bool) error { + k, err := hive(perUser).open(rootPath) + if err != nil { + return err + } + defer k.Close() + r, err := k.open(url.PathEscape(root)) + if err != nil { + return err + } + defer r.Close() + ids, err := r.Enumerate() + if err != nil { + return err + } + for _, id := range ids { + err = r.Remove(id) + if err != nil { + return err + } + } + r.Close() + return k.Remove(root) +} + +func (k *Key) Close() error { + err := k.Key.Close() + k.Key = 0 + return err +} + +func (k *Key) Enumerate() ([]string, error) { + escapedIDs, err := k.ReadSubKeyNames(0) + if err != nil { + return nil, err + } + var ids []string + for _, e := range escapedIDs { + id, err := url.PathUnescape(e) + if err == nil { + ids = append(ids, id) + } + } + return ids, nil +} + +func (k *Key) open(name string) (*Key, error) { + fullpath := filepath.Join(k.Name, name) + nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS) + if err != nil { + return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} + } + return &Key{nk, fullpath}, nil +} + +func (k *Key) openid(id string) (*Key, error) { + escaped := url.PathEscape(id) + fullpath := filepath.Join(k.Name, escaped) + nk, err := k.open(escaped) + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + return nil, &NotFoundError{id} + } + if err != nil { + return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} + } + return nk, nil +} + +func (k *Key) Remove(id string) error { + escaped := url.PathEscape(id) + err := registry.DeleteKey(k.Key, escaped) + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NotFoundError{id} + } + return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err} + } + return nil +} + +func (k *Key) set(id string, create bool, key string, state interface{}) error { + var sk *Key + var err error + if create { + var existing bool + eid := url.PathEscape(id) + sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS) + if err != nil { + return err + } + defer sk.Close() + if existing { + sk.Close() + return fmt.Errorf("container %s already exists", id) + } + } else { + sk, err = k.openid(id) + if err != nil { + return err + } + defer sk.Close() + } + switch reflect.TypeOf(state).Kind() { + case reflect.Bool: + v := uint32(0) + if state.(bool) { + v = 1 + } + err = sk.SetDWordValue(key, v) + case reflect.Int: + err = sk.SetQWordValue(key, uint64(state.(int))) + case reflect.String: + err = sk.SetStringValue(key, state.(string)) + default: + var js []byte + js, err = json.Marshal(state) + if err != nil { + return err + } + err = sk.SetBinaryValue(key, js) + } + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err} + } + return nil +} + +func (k *Key) Create(id, key string, state interface{}) error { + return k.set(id, true, key, state) +} + +func (k *Key) Set(id, key string, state interface{}) error { + return k.set(id, false, key, state) +} + +func (k *Key) Clear(id, key string) error { + sk, err := k.openid(id) + if err != nil { + return err + } + defer sk.Close() + err = sk.DeleteValue(key) + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err} + } + return nil +} + +func (k *Key) Get(id, key string, state interface{}) error { + sk, err := k.openid(id) + if err != nil { + return err + } + defer sk.Close() + + var js []byte + switch reflect.TypeOf(state).Elem().Kind() { + case reflect.Bool: + var v uint64 + v, _, err = sk.GetIntegerValue(key) + if err == nil { + *state.(*bool) = v != 0 + } + case reflect.Int: + var v uint64 + v, _, err = sk.GetIntegerValue(key) + if err == nil { + *state.(*int) = int(v) + } + case reflect.String: + var v string + v, _, err = sk.GetStringValue(key) + if err == nil { + *state.(*string) = string(v) + } + default: + js, _, err = sk.GetBinaryValue(key) + } + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err} + } + if js != nil { + err = json.Unmarshal(js, state) + } + return err +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go new file mode 100644 index 00000000000..4e349ad4984 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go @@ -0,0 +1,51 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package regstate + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") +) + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go new file mode 100644 index 00000000000..a161c204e2f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go @@ -0,0 +1,71 @@ +package runhcs + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "syscall" + "time" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +// ContainerState represents the platform agnostic pieces relating to a +// running container's status and state +type ContainerState struct { + // Version is the OCI version for the container + Version string `json:"ociVersion"` + // ID is the container ID + ID string `json:"id"` + // InitProcessPid is the init process id in the parent namespace + InitProcessPid int `json:"pid"` + // Status is the current status of the container, running, paused, ... + Status string `json:"status"` + // Bundle is the path on the filesystem to the bundle + Bundle string `json:"bundle"` + // Rootfs is a path to a directory containing the container's root filesystem. + Rootfs string `json:"rootfs"` + // Created is the unix timestamp for the creation time of the container in UTC + Created time.Time `json:"created"` + // Annotations is the user defined annotations added to the config. + Annotations map[string]string `json:"annotations,omitempty"` + // The owner of the state directory (the owner of the container). + Owner string `json:"owner"` +} + +// GetErrorFromPipe returns reads from `pipe` and verifies if the operation +// returned success or error. If error converts that to an error and returns. If +// `p` is not nill will issue a `Kill` and `Wait` for exit. +func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { + serr, err := ioutil.ReadAll(pipe) + if err != nil { + return err + } + + if bytes.Equal(serr, ShimSuccess) { + return nil + } + + extra := "" + if p != nil { + _ = p.Kill() + state, err := p.Wait() + if err != nil { + panic(err) + } + extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode) + } + if len(serr) == 0 { + return fmt.Errorf("unknown shim failure%s", extra) + } + + return errors.New(string(serr)) +} + +// VMPipePath returns the named pipe path for the vm shim. +func VMPipePath(hostUniqueID guid.GUID) string { + return SafePipePath("runhcs-vm-" + hostUniqueID.String()) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go new file mode 100644 index 00000000000..dcbb1903b8f --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go @@ -0,0 +1,16 @@ +package runhcs + +import "net/url" + +const ( + SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\` +) + +// ShimSuccess is the byte stream returned on a successful operation. +var ShimSuccess = []byte{0, 'O', 'K', 0} + +func SafePipePath(name string) string { + // Use a pipe in the Administrators protected prefixed to prevent malicious + // squatting. + return SafePipePrefix + url.PathEscape(name) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go new file mode 100644 index 00000000000..2c8957b88df --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go @@ -0,0 +1,43 @@ +package runhcs + +import ( + "encoding/json" + + "github.com/Microsoft/go-winio" +) + +// VMRequestOp is an operation that can be issued to a VM shim. +type VMRequestOp string + +const ( + // OpCreateContainer is a create container request. + OpCreateContainer VMRequestOp = "create" + // OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM. + OpSyncNamespace VMRequestOp = "sync" + // OpUnmountContainer is a container unmount request. + OpUnmountContainer VMRequestOp = "unmount" + // OpUnmountContainerDiskOnly is a container unmount disk request. + OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk" +) + +// VMRequest is an operation request that is issued to a VM shim. +type VMRequest struct { + ID string + Op VMRequestOp +} + +// IssueVMRequest issues a request to a shim at the given pipe. +func IssueVMRequest(pipepath string, req *VMRequest) error { + pipe, err := winio.DialPipe(pipepath, nil) + if err != nil { + return err + } + defer pipe.Close() + if err := json.NewEncoder(pipe).Encode(req); err != nil { + return err + } + if err := GetErrorFromPipe(pipe, nil); err != nil { + return err + } + return nil +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go new file mode 100644 index 00000000000..eaf39fa5132 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -0,0 +1,74 @@ +package timeout + +import ( + "os" + "strconv" + "time" +) + +var ( + // defaultTimeout is the timeout for most operations that is not overridden. + defaultTimeout = 4 * time.Minute + + // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond + // for a disk to come online in LCOW. + defaultTimeoutTestdRetry = 5 * time.Second +) + +// External variables for HCSShim consumers to use. +var ( + // SystemCreate is the timeout for creating a compute system + SystemCreate time.Duration = defaultTimeout + + // SystemStart is the timeout for starting a compute system + SystemStart time.Duration = defaultTimeout + + // SystemPause is the timeout for pausing a compute system + SystemPause time.Duration = defaultTimeout + + // SystemResume is the timeout for resuming a compute system + SystemResume time.Duration = defaultTimeout + + // SystemSave is the timeout for saving a compute system + SystemSave time.Duration = defaultTimeout + + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. + SyscallWatcher time.Duration = defaultTimeout + + // Tar2VHD is the timeout for the tar2vhd operation to complete + Tar2VHD time.Duration = defaultTimeout + + // ExternalCommandToStart is the timeout for external commands to start + ExternalCommandToStart = defaultTimeout + + // ExternalCommandToComplete is the timeout for external commands to complete. + // Generally this means copying data from their stdio pipes. + ExternalCommandToComplete = defaultTimeout + + // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW + TestDRetryLoop = defaultTimeoutTestdRetry +) + +func init() { + SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) + SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) + SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) + SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) + SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) + Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) + ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) + ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) + TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) +} + +func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { + envTimeout := os.Getenv(env) + if len(envTimeout) > 0 { + e, err := strconv.Atoi(envTimeout) + if err == nil && e > 0 { + return time.Second * time.Duration(e) + } + } + return defaultValue +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go new file mode 100644 index 00000000000..e7f114b67aa --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -0,0 +1,610 @@ +package vmcompute + +import ( + gcontext "context" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "go.opencensus.io/trace" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? +//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? +//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? + +//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? +//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously +const errVmcomputeOperationPending = syscall.Errno(0xC0370103) + +// HcsSystem is the handle associated with a created compute system. +type HcsSystem syscall.Handle + +// HcsProcess is the handle associated with a created process in a compute +// system. +type HcsProcess syscall.Handle + +// HcsCallback is the handle associated with the function to call when events +// occur. +type HcsCallback syscall.Handle + +// HcsProcessInformation is the structure used when creating or getting process +// info. +type HcsProcessInformation struct { + // ProcessId is the pid of the created process. + ProcessId uint32 + reserved uint32 //nolint:structcheck + // StdInput is the handle associated with the stdin of the process. + StdInput syscall.Handle + // StdOutput is the handle associated with the stdout of the process. + StdOutput syscall.Handle + // StdError is the handle associated with the stderr of the process. + StdError syscall.Handle +} + +func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { + if timeout > 0 { + var cancel gcontext.CancelFunc + ctx, cancel = gcontext.WithTimeout(ctx, timeout) + defer cancel() + } + + done := make(chan error, 1) + go func() { + done <- f() + }() + select { + case <-ctx.Done(): + if ctx.Err() == gcontext.DeadlineExceeded { + log.G(ctx).WithField(logfields.Timeout, timeout). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + } + return ctx.Err() + case err := <-done: + return err + } +} + +func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("query", query)) + + return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + computeSystemsp *uint16 + resultp *uint16 + ) + err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + if computeSystemsp != nil { + computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes( + trace.StringAttribute("id", id), + trace.StringAttribute("configuration", configuration)) + + return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { + var resultp *uint16 + err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenComputeSystem(id, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseComputeSystem(computeSystem) + }) +} + +func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemStart, func() error { + var resultp *uint16 + err := hcsStartComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsShutdownComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemPause, func() error { + var resultp *uint16 + err := hcsPauseComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemResume, func() error { + var resultp *uint16 + err := hcsResumeComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("configuration", configuration)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyServiceSettings(settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterComputeSystemCallback(callbackHandle) + }) +} + +func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) + + return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) + + return process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenProcess(computeSystem, pid, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseProcess(process) + }) +} + +func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateProcess(process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSignalProcess(process, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsGetProcessInfo(process, &processInformation, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + processPropertiesp *uint16 + resultp *uint16 + ) + err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) + if processPropertiesp != nil { + processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyProcess(process, settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterProcessCallback(callbackHandle) + }) +} + +func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSaveComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go new file mode 100644 index 00000000000..cae55058deb --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -0,0 +1,581 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package vmcompute + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") + procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") +) + +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) +} + +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) +} + +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + if hr = procHcsCreateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { + if hr = procHcsOpenComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { + if hr = procHcsCloseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsStartComputeSystem(computeSystem, _p0, result) +} + +func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsStartComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsShutdownComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsTerminateComputeSystem(computeSystem, _p0, result) +} + +func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsPauseComputeSystem(computeSystem, _p0, result) +} + +func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsPauseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsResumeComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) +} + +func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsModifyComputeSystem(computeSystem, _p0, result) +} + +func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { + if hr = procHcsModifyComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyServiceSettings.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { + if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsSaveComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(processParameters) + if hr != nil { + return + } + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) +} + +func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + if hr = procHcsCreateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { + if hr = procHcsOpenProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseProcess(process HcsProcess) (hr error) { + if hr = procHcsCloseProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSignalProcess(process, _p0, result) +} + +func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { + if hr = procHcsSignalProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { + if hr = procHcsGetProcessInfo.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { + if hr = procHcsGetProcessProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyProcess(process, _p0, result) +} + +func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetServiceProperties(_p0, properties, result) +} + +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetServiceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + if hr = procHcsRegisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { + if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 00000000000..def9525417e --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,44 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go new file mode 100644 index 00000000000..df28ea24216 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -0,0 +1,13 @@ +package winapi + +import "github.com/Microsoft/go-winio/pkg/guid" + +//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA +//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA +//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW +//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW + +type DevPropKey struct { + Fmtid guid.GUID + Pid uint32 +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go new file mode 100644 index 00000000000..4e80ef68c92 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -0,0 +1,15 @@ +package winapi + +import "syscall" + +//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError + +const ( + STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B + ERROR_NO_MORE_ITEMS = 0x103 + ERROR_MORE_DATA syscall.Errno = 234 +) + +func NTSuccess(status uint32) bool { + return status == 0 +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go new file mode 100644 index 00000000000..7ce52afd5e1 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -0,0 +1,110 @@ +package winapi + +//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile + +//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject +//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject + +const ( + FileLinkInformationClass = 11 + FileDispositionInformationExClass = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + OBJ_DONT_REPARSE = 0x1000 + + STATUS_MORE_ENTRIES = 0x105 + STATUS_NO_MORE_ENTRIES = 0x8000001a +) + +// Select entries from FILE_INFO_BY_HANDLE_CLASS. +// +// C declaration: +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class +const ( + FileIdInfo = 18 +) + +type FileDispositionInformationEx struct { + Flags uintptr +} + +type IOStatusBlock struct { + Status, Information uintptr +} + +type ObjectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *UnicodeString + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type ObjectDirectoryInformation struct { + Name UnicodeString + TypeName UnicodeString +} + +type FileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +// C declaration: +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info +type FILE_ID_INFO struct { + VolumeSerialNumber uint64 + FileID [16]byte +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go new file mode 100644 index 00000000000..7eb13f8f0a8 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -0,0 +1,218 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// Messages that can be received from an assigned io completion port. +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +const ( + JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 + JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 + JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 + JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 + JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 + JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 + JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 + JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 + JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 + JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 +) + +// Access rights for creating or opening job objects. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights +const ( + JOB_OBJECT_QUERY = 0x0004 + JOB_OBJECT_ALL_ACCESS = 0x1F001F +) + +// IO limit flags +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 + +const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +const ( + JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota + JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY + JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE +) + +// JobObjectInformationClass values. Used for a call to QueryInformationJobObject +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject +const ( + JobObjectBasicAccountingInformation uint32 = 1 + JobObjectBasicProcessIdList uint32 = 3 + JobObjectBasicAndIoAccountingInformation uint32 = 8 + JobObjectLimitViolationInformation uint32 = 13 + JobObjectMemoryUsageInformation uint32 = 28 + JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectIoAttribution uint32 = 42 +) + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { + ControlFlags uint32 + Value uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { + MaxIops int64 + MaxBandwidth int64 + ReservationIops int64 + BaseIOSize uint32 + VolumeName string + ControlFlags uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list +type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { + NumberOfAssignedProcesses uint32 + NumberOfProcessIdsInList uint32 + ProcessIdList [1]uintptr +} + +// AllPids returns all the process Ids in the job object. +func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information +type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { + TotalUserTime int64 + TotalKernelTime int64 + ThisPeriodTotalUserTime int64 + ThisPeriodTotalKernelTime int64 + TotalPageFaultCount uint32 + TotalProcesses uint32 + ActiveProcesses uint32 + TotalTerminateProcesses uint32 +} + +//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { + BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION + IoInfo windows.IO_COUNTERS +} + +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; +// +type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { + JobMemory uint64 + PeakJobMemoryUsed uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; +// +type JOBOBJECT_IO_ATTRIBUTION_STATS struct { + IoCount uintptr + TotalNonOverlappedQueueTime uint64 + TotalNonOverlappedServiceTime uint64 + TotalSize uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; +// +type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { + ControlFlags uint32 + ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS + WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { + CompletionKey windows.Handle + CompletionPort windows.Handle +} + +// BOOL IsProcessInJob( +// HANDLE ProcessHandle, +// HANDLE JobHandle, +// PBOOL Result +// ); +// +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob + +// BOOL QueryInformationJobObject( +// HANDLE hJob, +// JOBOBJECTINFOCLASS JobObjectInformationClass, +// LPVOID lpJobObjectInformation, +// DWORD cbJobObjectInformationLength, +// LPDWORD lpReturnLength +// ); +// +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject + +// HANDLE OpenJobObjectW( +// DWORD dwDesiredAccess, +// BOOL bInheritHandle, +// LPCWSTR lpName +// ); +// +//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW + +// DWORD SetIoRateControlInformationJobObject( +// HANDLE hJob, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo +// ); +// +//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject + +// DWORD QueryIoRateControlInformationJobObject( +// HANDLE hJob, +// PCWSTR VolumeName, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, +// ULONG *InfoBlockCount +// ); +//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject + +// NTSTATUS +// NtOpenJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject + +// NTSTATUS +// NTAPI +// NtCreateJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go new file mode 100644 index 00000000000..b6e7cfd4601 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go @@ -0,0 +1,30 @@ +package winapi + +// BOOL LogonUserA( +// LPCWSTR lpszUsername, +// LPCWSTR lpszDomain, +// LPCWSTR lpszPassword, +// DWORD dwLogonType, +// DWORD dwLogonProvider, +// PHANDLE phToken +// ); +// +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW + +// Logon types +const ( + LOGON32_LOGON_INTERACTIVE uint32 = 2 + LOGON32_LOGON_NETWORK uint32 = 3 + LOGON32_LOGON_BATCH uint32 = 4 + LOGON32_LOGON_SERVICE uint32 = 5 + LOGON32_LOGON_UNLOCK uint32 = 7 + LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 + LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 +) + +// Logon providers +const ( + LOGON32_PROVIDER_DEFAULT uint32 = 0 + LOGON32_PROVIDER_WINNT40 uint32 = 2 + LOGON32_PROVIDER_WINNT50 uint32 = 3 +) diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go new file mode 100644 index 00000000000..53f62948c90 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -0,0 +1,4 @@ +package winapi + +//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys LocalFree(ptr uintptr) = kernel32.LocalFree diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go new file mode 100644 index 00000000000..f37910024f7 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go @@ -0,0 +1,3 @@ +package winapi + +//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go new file mode 100644 index 00000000000..908920e8722 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -0,0 +1,11 @@ +package winapi + +// DWORD SearchPathW( +// LPCWSTR lpPath, +// LPCWSTR lpFileName, +// LPCWSTR lpExtension, +// DWORD nBufferLength, +// LPWSTR lpBuffer, +// LPWSTR *lpFilePart +// ); +//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go new file mode 100644 index 00000000000..222529f433a --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -0,0 +1,65 @@ +package winapi + +const PROCESS_ALL_ACCESS uint32 = 2097151 + +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) + +// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. +const ProcessVmCounters = 3 + +// __kernel_entry NTSTATUS NtQueryInformationProcess( +// [in] HANDLE ProcessHandle, +// [in] PROCESSINFOCLASS ProcessInformationClass, +// [out] PVOID ProcessInformation, +// [in] ULONG ProcessInformationLength, +// [out, optional] PULONG ReturnLength +// ); +// +//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess + +// typedef struct _VM_COUNTERS_EX +// { +// SIZE_T PeakVirtualSize; +// SIZE_T VirtualSize; +// ULONG PageFaultCount; +// SIZE_T PeakWorkingSetSize; +// SIZE_T WorkingSetSize; +// SIZE_T QuotaPeakPagedPoolUsage; +// SIZE_T QuotaPagedPoolUsage; +// SIZE_T QuotaPeakNonPagedPoolUsage; +// SIZE_T QuotaNonPagedPoolUsage; +// SIZE_T PagefileUsage; +// SIZE_T PeakPagefileUsage; +// SIZE_T PrivateUsage; +// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; +// +type VM_COUNTERS_EX struct { + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +// typedef struct _VM_COUNTERS_EX2 +// { +// VM_COUNTERS_EX CountersEx; +// SIZE_T PrivateWorkingSetSize; +// SIZE_T SharedCommitUsage; +// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; +// +type VM_COUNTERS_EX2 struct { + CountersEx VM_COUNTERS_EX + PrivateWorkingSetSize uintptr + SharedCommitUsage uintptr +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go new file mode 100644 index 00000000000..ce79ac2cdb8 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go @@ -0,0 +1,7 @@ +package winapi + +// Get count from all processor groups. +// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups +const ALL_PROCESSOR_GROUPS = 0xFFFF + +//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go new file mode 100644 index 00000000000..78fe01a4b41 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -0,0 +1,53 @@ +package winapi + +import "golang.org/x/sys/windows" + +const SystemProcessInformation = 5 + +const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 + +// __kernel_entry NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +// +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 // ULONG + NumberOfThreads uint32 // ULONG + WorkingSetPrivateSize int64 // LARGE_INTEGER + HardFaultCount uint32 // ULONG + NumberOfThreadsHighWatermark uint32 // ULONG + CycleTime uint64 // ULONGLONG + CreateTime int64 // LARGE_INTEGER + UserTime int64 // LARGE_INTEGER + KernelTime int64 // LARGE_INTEGER + ImageName UnicodeString // UNICODE_STRING + BasePriority int32 // KPRIORITY + UniqueProcessID windows.Handle // HANDLE + InheritedFromUniqueProcessID windows.Handle // HANDLE + HandleCount uint32 // ULONG + SessionID uint32 // ULONG + UniqueProcessKey *uint32 // ULONG_PTR + PeakVirtualSize uintptr // SIZE_T + VirtualSize uintptr // SIZE_T + PageFaultCount uint32 // ULONG + PeakWorkingSetSize uintptr // SIZE_T + WorkingSetSize uintptr // SIZE_T + QuotaPeakPagedPoolUsage uintptr // SIZE_T + QuotaPagedPoolUsage uintptr // SIZE_T + QuotaPeakNonPagedPoolUsage uintptr // SIZE_T + QuotaNonPagedPoolUsage uintptr // SIZE_T + PagefileUsage uintptr // SIZE_T + PeakPagefileUsage uintptr // SIZE_T + PrivatePageCount uintptr // SIZE_T + ReadOperationCount int64 // LARGE_INTEGER + WriteOperationCount int64 // LARGE_INTEGER + OtherOperationCount int64 // LARGE_INTEGER + ReadTransferCount int64 // LARGE_INTEGER + WriteTransferCount int64 // LARGE_INTEGER + OtherTransferCount int64 // LARGE_INTEGER +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go new file mode 100644 index 00000000000..4724713e3e4 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -0,0 +1,12 @@ +package winapi + +// HANDLE CreateRemoteThread( +// HANDLE hProcess, +// LPSECURITY_ATTRIBUTES lpThreadAttributes, +// SIZE_T dwStackSize, +// LPTHREAD_START_ROUTINE lpStartAddress, +// LPVOID lpParameter, +// DWORD dwCreationFlags, +// LPDWORD lpThreadId +// ); +//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go new file mode 100644 index 00000000000..859b753c246 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -0,0 +1,80 @@ +package winapi + +import ( + "errors" + "reflect" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice +// for easier interop with Go APIs +func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) + hdr.Data = uintptr(unsafe.Pointer(buffer)) + hdr.Cap = bufferLength + hdr.Len = bufferLength + + return +} + +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string +type UnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + +//String converts a UnicodeString to a golang string +func (uni UnicodeString) String() string { + // UnicodeString is not guaranteed to be null terminated, therefore + // use the UnicodeString's Length field + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) +} + +// NewUnicodeString allocates a new UnicodeString and copies `s` into +// the buffer of the new UnicodeString. +func NewUnicodeString(s string) (*UnicodeString, error) { + buf, err := windows.UTF16FromString(s) + if err != nil { + return nil, err + } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + + uni := &UnicodeString{ + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), + Buffer: &buf[0], + } + return uni, nil +} + +// ConvertStringSetToSlice is a helper function used to convert the contents of +// `buf` into a string slice. `buf` contains a set of null terminated strings +// with an additional null at the end to indicate the end of the set. +func ConvertStringSetToSlice(buf []byte) ([]string, error) { + var results []string + prev := 0 + for i := range buf { + if buf[i] == 0 { + if prev == i { + // found two null characters in a row, return result + return results, nil + } + results = append(results, string(buf[prev:i])) + prev = i + 1 + } + } + return nil, errors.New("string set malformed: missing null terminator at end of buffer") +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go new file mode 100644 index 00000000000..d2cc9d9fba6 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -0,0 +1,5 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi + +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go new file mode 100644 index 00000000000..1f16cf0b8e1 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -0,0 +1,354 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package winapi + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") +) + +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) + } + return +} + +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func LocalFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + status = uint32(r0) + return +} + +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) + return +} + +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return + } + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) +} + +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + status = uint32(r0) + return +} + +func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { + var _p0 uint32 + if singleEntry { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func RtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 00000000000..3ab3bcd89a1 --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,50 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + var err error + osv = OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 00000000000..75dce5d821d --- /dev/null +++ b/ecs-agent/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,50 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + + // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + + // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + + // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 +) diff --git a/ecs-agent/vendor/github.com/containerd/cgroups/LICENSE b/ecs-agent/vendor/github.com/containerd/cgroups/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/ecs-agent/vendor/github.com/containerd/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/doc.go new file mode 100644 index 00000000000..23f3cdd4b37 --- /dev/null +++ b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1 diff --git a/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go new file mode 100644 index 00000000000..6d2d41770b9 --- /dev/null +++ b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go @@ -0,0 +1,6125 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/cgroups/stats/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metrics struct { + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metrics) Reset() { *m = Metrics{} } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{0} +} +func (m *Metrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) +} +func (m *Metrics) XXX_Size() int { + return m.Size() +} +func (m *Metrics) XXX_DiscardUnknown() { + xxx_messageInfo_Metrics.DiscardUnknown(m) +} + +var xxx_messageInfo_Metrics proto.InternalMessageInfo + +type HugetlbStat struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } +func (*HugetlbStat) ProtoMessage() {} +func (*HugetlbStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{1} +} +func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HugetlbStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_HugetlbStat.Merge(m, src) +} +func (m *HugetlbStat) XXX_Size() int { + return m.Size() +} +func (m *HugetlbStat) XXX_DiscardUnknown() { + xxx_messageInfo_HugetlbStat.DiscardUnknown(m) +} + +var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo + +type PidsStat struct { + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PidsStat) Reset() { *m = PidsStat{} } +func (*PidsStat) ProtoMessage() {} +func (*PidsStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{2} +} +func (m *PidsStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PidsStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_PidsStat.Merge(m, src) +} +func (m *PidsStat) XXX_Size() int { + return m.Size() +} +func (m *PidsStat) XXX_DiscardUnknown() { + xxx_messageInfo_PidsStat.DiscardUnknown(m) +} + +var xxx_messageInfo_PidsStat proto.InternalMessageInfo + +type CPUStat struct { + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUStat) Reset() { *m = CPUStat{} } +func (*CPUStat) ProtoMessage() {} +func (*CPUStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{3} +} +func (m *CPUStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUStat.Merge(m, src) +} +func (m *CPUStat) XXX_Size() int { + return m.Size() +} +func (m *CPUStat) XXX_DiscardUnknown() { + xxx_messageInfo_CPUStat.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUStat proto.InternalMessageInfo + +type CPUUsage struct { + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{4} +} +func (m *CPUUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUUsage.Merge(m, src) +} +func (m *CPUUsage) XXX_Size() int { + return m.Size() +} +func (m *CPUUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CPUUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUUsage proto.InternalMessageInfo + +type Throttle struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Throttle) Reset() { *m = Throttle{} } +func (*Throttle) ProtoMessage() {} +func (*Throttle) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{5} +} +func (m *Throttle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Throttle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Throttle.Merge(m, src) +} +func (m *Throttle) XXX_Size() int { + return m.Size() +} +func (m *Throttle) XXX_DiscardUnknown() { + xxx_messageInfo_Throttle.DiscardUnknown(m) +} + +var xxx_messageInfo_Throttle proto.InternalMessageInfo + +type MemoryStat struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryStat) Reset() { *m = MemoryStat{} } +func (*MemoryStat) ProtoMessage() {} +func (*MemoryStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{6} +} +func (m *MemoryStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryStat.Merge(m, src) +} +func (m *MemoryStat) XXX_Size() int { + return m.Size() +} +func (m *MemoryStat) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryStat.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryStat proto.InternalMessageInfo + +type MemoryEntry struct { + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } +func (*MemoryEntry) ProtoMessage() {} +func (*MemoryEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{7} +} +func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryEntry.Merge(m, src) +} +func (m *MemoryEntry) XXX_Size() int { + return m.Size() +} +func (m *MemoryEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo + +type MemoryOomControl struct { + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } +func (*MemoryOomControl) ProtoMessage() {} +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{8} +} +func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryOomControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryOomControl.Merge(m, src) +} +func (m *MemoryOomControl) XXX_Size() int { + return m.Size() +} +func (m *MemoryOomControl) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo + +type BlkIOStat struct { + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } +func (*BlkIOStat) ProtoMessage() {} +func (*BlkIOStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{9} +} +func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOStat.Merge(m, src) +} +func (m *BlkIOStat) XXX_Size() int { + return m.Size() +} +func (m *BlkIOStat) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOStat.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo + +type BlkIOEntry struct { + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } +func (*BlkIOEntry) ProtoMessage() {} +func (*BlkIOEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{10} +} +func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOEntry.Merge(m, src) +} +func (m *BlkIOEntry) XXX_Size() int { + return m.Size() +} +func (m *BlkIOEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo + +type RdmaStat struct { + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaStat) Reset() { *m = RdmaStat{} } +func (*RdmaStat) ProtoMessage() {} +func (*RdmaStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{11} +} +func (m *RdmaStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaStat.Merge(m, src) +} +func (m *RdmaStat) XXX_Size() int { + return m.Size() +} +func (m *RdmaStat) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaStat.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaStat proto.InternalMessageInfo + +type RdmaEntry struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } +func (*RdmaEntry) ProtoMessage() {} +func (*RdmaEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{12} +} +func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaEntry.Merge(m, src) +} +func (m *RdmaEntry) XXX_Size() int { + return m.Size() +} +func (m *RdmaEntry) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo + +type NetworkStat struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkStat) Reset() { *m = NetworkStat{} } +func (*NetworkStat) ProtoMessage() {} +func (*NetworkStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{13} +} +func (m *NetworkStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkStat.Merge(m, src) +} +func (m *NetworkStat) XXX_Size() int { + return m.Size() +} +func (m *NetworkStat) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkStat.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkStat proto.InternalMessageInfo + +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{14} +} +func (m *CgroupStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CgroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_CgroupStats.Merge(m, src) +} +func (m *CgroupStats) XXX_Size() int { + return m.Size() +} +func (m *CgroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_CgroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_CgroupStats proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") + proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") + proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") + proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") + proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") + proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") + proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") + proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") + proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") + proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") + proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") + proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") + proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") + proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") + proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") +} + +func init() { + proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) +} + +var fileDescriptor_a17b2d87c332bfaa = []byte{ + // 1749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, + 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, + 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, + 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, + 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, + 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, + 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, + 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, + 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, + 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, + 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, + 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, + 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, + 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, + 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, + 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, + 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, + 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, + 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, + 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, + 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, + 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, + 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, + 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, + 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, + 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, + 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, + 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, + 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, + 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, + 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, + 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, + 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, + 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, + 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, + 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, + 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, + 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, + 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, + 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, + 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, + 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, + 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, + 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, + 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, + 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, + 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, + 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, + 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, + 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, + 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, + 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, + 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, + 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, + 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, + 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, + 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, + 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, + 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, + 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, + 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, + 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, + 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, + 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, + 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, + 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, + 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, + 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, + 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, + 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, + 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, + 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, + 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, + 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, + 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, + 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, + 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, + 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, + 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, + 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, + 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, + 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, + 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, + 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, + 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, + 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, + 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, + 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, + 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, + 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, + 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, + 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, + 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, + 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, + 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, + 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, + 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, + 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, + 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, + 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, + 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, + 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, + 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, + 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, + 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, + 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, + 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, + 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, + 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, + 0x22, 0xc4, 0x11, 0x00, 0x00, +} + +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemoryOomControl != nil { + { + size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CgroupStats != nil { + { + size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Network) > 0 { + for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Rdma != nil { + { + size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Blkio != nil { + { + size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pids != nil { + { + size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Hugetlb) > 0 { + for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Pagesize) > 0 { + i -= len(m.Pagesize) + copy(dAtA[i:], m.Pagesize) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) + i-- + dAtA[i] = 0x22 + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x18 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PidsStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if m.Current != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CPUStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Throttling != nil { + { + size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CPUUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PerCPU) > 0 { + dAtA11 := make([]byte, len(m.PerCPU)*10) + var j10 int + for _, num := range m.PerCPU { + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x22 + } + if m.User != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + i-- + dAtA[i] = 0x18 + } + if m.Kernel != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) + i-- + dAtA[i] = 0x10 + } + if m.Total != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Throttle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ThrottledTime != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) + i-- + dAtA[i] = 0x18 + } + if m.ThrottledPeriods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + i-- + dAtA[i] = 0x10 + } + if m.Periods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.KernelTCP != nil { + { + size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.Kernel != nil { + { + size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.TotalUnevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.TotalActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.TotalInactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.TotalActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if m.TotalInactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.TotalPgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.TotalPgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.TotalPgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.TotalPgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.TotalWriteback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.TotalDirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.TotalMappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.TotalRSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.TotalRSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.TotalCache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.HierarchicalSwapLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.HierarchicalMemoryLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.Unevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + i-- + dAtA[i] = 0x78 + } + if m.ActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + i-- + dAtA[i] = 0x70 + } + if m.InactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) + i-- + dAtA[i] = 0x68 + } + if m.ActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) + i-- + dAtA[i] = 0x60 + } + if m.InactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) + i-- + dAtA[i] = 0x58 + } + if m.PgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) + i-- + dAtA[i] = 0x50 + } + if m.PgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) + i-- + dAtA[i] = 0x48 + } + if m.PgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) + i-- + dAtA[i] = 0x40 + } + if m.PgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) + i-- + dAtA[i] = 0x38 + } + if m.Writeback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) + i-- + dAtA[i] = 0x30 + } + if m.Dirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) + i-- + dAtA[i] = 0x28 + } + if m.MappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + i-- + dAtA[i] = 0x20 + } + if m.RSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + i-- + dAtA[i] = 0x18 + } + if m.RSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + i-- + dAtA[i] = 0x10 + } + if m.Cache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x20 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x18 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x10 + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OomKill != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) + i-- + dAtA[i] = 0x18 + } + if m.UnderOom != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) + i-- + dAtA[i] = 0x10 + } + if m.OomKillDisable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SectorsRecursive) > 0 { + for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.IoTimeRecursive) > 0 { + for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.IoMergedRecursive) > 0 { + for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.IoQueuedRecursive) > 0 { + for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.IoServicedRecursive) > 0 { + for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.IoServiceBytesRecursive) > 0 { + for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x28 + } + if m.Minor != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) + i-- + dAtA[i] = 0x20 + } + if m.Major != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + i-- + dAtA[i] = 0x18 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x12 + } + if len(m.Op) > 0 { + i -= len(m.Op) + copy(dAtA[i:], m.Op) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RdmaStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Limit) > 0 { + for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Current) > 0 { + for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.HcaObjects != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) + i-- + dAtA[i] = 0x18 + } + if m.HcaHandles != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + i-- + dAtA[i] = 0x10 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + i-- + dAtA[i] = 0x48 + } + if m.TxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + i-- + dAtA[i] = 0x40 + } + if m.TxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + i-- + dAtA[i] = 0x38 + } + if m.TxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + i-- + dAtA[i] = 0x30 + } + if m.RxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + i-- + dAtA[i] = 0x28 + } + if m.RxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + i-- + dAtA[i] = 0x20 + } + if m.RxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + i-- + dAtA[i] = 0x18 + } + if m.RxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CgroupStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NrIoWait != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) + i-- + dAtA[i] = 0x28 + } + if m.NrUninterruptible != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) + i-- + dAtA[i] = 0x20 + } + if m.NrStopped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) + i-- + dAtA[i] = 0x18 + } + if m.NrRunning != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) + i-- + dAtA[i] = 0x10 + } + if m.NrSleeping != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Metrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hugetlb) > 0 { + for _, e := range m.Hugetlb { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Pids != nil { + l = m.Pids.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Blkio != nil { + l = m.Blkio.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Rdma != nil { + l = m.Rdma.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.Network) > 0 { + for _, e := range m.Network { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.CgroupStats != nil { + l = m.CgroupStats.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.MemoryOomControl != nil { + l = m.MemoryOomControl.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HugetlbStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + l = len(m.Pagesize) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PidsStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Current != 0 { + n += 1 + sovMetrics(uint64(m.Current)) + } + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != nil { + l = m.Usage.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Throttling != nil { + l = m.Throttling.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovMetrics(uint64(m.Total)) + } + if m.Kernel != 0 { + n += 1 + sovMetrics(uint64(m.Kernel)) + } + if m.User != 0 { + n += 1 + sovMetrics(uint64(m.User)) + } + if len(m.PerCPU) > 0 { + l = 0 + for _, e := range m.PerCPU { + l += sovMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Throttle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Periods != 0 { + n += 1 + sovMetrics(uint64(m.Periods)) + } + if m.ThrottledPeriods != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) + } + if m.ThrottledTime != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledTime)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cache != 0 { + n += 1 + sovMetrics(uint64(m.Cache)) + } + if m.RSS != 0 { + n += 1 + sovMetrics(uint64(m.RSS)) + } + if m.RSSHuge != 0 { + n += 1 + sovMetrics(uint64(m.RSSHuge)) + } + if m.MappedFile != 0 { + n += 1 + sovMetrics(uint64(m.MappedFile)) + } + if m.Dirty != 0 { + n += 1 + sovMetrics(uint64(m.Dirty)) + } + if m.Writeback != 0 { + n += 1 + sovMetrics(uint64(m.Writeback)) + } + if m.PgPgIn != 0 { + n += 1 + sovMetrics(uint64(m.PgPgIn)) + } + if m.PgPgOut != 0 { + n += 1 + sovMetrics(uint64(m.PgPgOut)) + } + if m.PgFault != 0 { + n += 1 + sovMetrics(uint64(m.PgFault)) + } + if m.PgMajFault != 0 { + n += 1 + sovMetrics(uint64(m.PgMajFault)) + } + if m.InactiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.InactiveAnon)) + } + if m.ActiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.ActiveAnon)) + } + if m.InactiveFile != 0 { + n += 1 + sovMetrics(uint64(m.InactiveFile)) + } + if m.ActiveFile != 0 { + n += 1 + sovMetrics(uint64(m.ActiveFile)) + } + if m.Unevictable != 0 { + n += 1 + sovMetrics(uint64(m.Unevictable)) + } + if m.HierarchicalMemoryLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) + } + if m.HierarchicalSwapLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) + } + if m.TotalCache != 0 { + n += 2 + sovMetrics(uint64(m.TotalCache)) + } + if m.TotalRSS != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSS)) + } + if m.TotalRSSHuge != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) + } + if m.TotalMappedFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalMappedFile)) + } + if m.TotalDirty != 0 { + n += 2 + sovMetrics(uint64(m.TotalDirty)) + } + if m.TotalWriteback != 0 { + n += 2 + sovMetrics(uint64(m.TotalWriteback)) + } + if m.TotalPgPgIn != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) + } + if m.TotalPgPgOut != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) + } + if m.TotalPgFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgFault)) + } + if m.TotalPgMajFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) + } + if m.TotalInactiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) + } + if m.TotalActiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) + } + if m.TotalInactiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) + } + if m.TotalActiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveFile)) + } + if m.TotalUnevictable != 0 { + n += 2 + sovMetrics(uint64(m.TotalUnevictable)) + } + if m.Usage != nil { + l = m.Usage.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Kernel != nil { + l = m.Kernel.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.KernelTCP != nil { + l = m.KernelTCP.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryOomControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OomKillDisable != 0 { + n += 1 + sovMetrics(uint64(m.OomKillDisable)) + } + if m.UnderOom != 0 { + n += 1 + sovMetrics(uint64(m.UnderOom)) + } + if m.OomKill != 0 { + n += 1 + sovMetrics(uint64(m.OomKill)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.IoServiceBytesRecursive) > 0 { + for _, e := range m.IoServiceBytesRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServicedRecursive) > 0 { + for _, e := range m.IoServicedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoQueuedRecursive) > 0 { + for _, e := range m.IoQueuedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for _, e := range m.IoServiceTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for _, e := range m.IoWaitTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoMergedRecursive) > 0 { + for _, e := range m.IoMergedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoTimeRecursive) > 0 { + for _, e := range m.IoTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.SectorsRecursive) > 0 { + for _, e := range m.SectorsRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Op) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Major != 0 { + n += 1 + sovMetrics(uint64(m.Major)) + } + if m.Minor != 0 { + n += 1 + sovMetrics(uint64(m.Minor)) + } + if m.Value != 0 { + n += 1 + sovMetrics(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Current) > 0 { + for _, e := range m.Current { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Limit) > 0 { + for _, e := range m.Limit { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.HcaHandles != 0 { + n += 1 + sovMetrics(uint64(m.HcaHandles)) + } + if m.HcaObjects != 0 { + n += 1 + sovMetrics(uint64(m.HcaObjects)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NetworkStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.RxBytes != 0 { + n += 1 + sovMetrics(uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + n += 1 + sovMetrics(uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + n += 1 + sovMetrics(uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + n += 1 + sovMetrics(uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + n += 1 + sovMetrics(uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + n += 1 + sovMetrics(uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + n += 1 + sovMetrics(uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + n += 1 + sovMetrics(uint64(m.TxDropped)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CgroupStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NrSleeping != 0 { + n += 1 + sovMetrics(uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + n += 1 + sovMetrics(uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + n += 1 + sovMetrics(uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + n += 1 + sovMetrics(uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + n += 1 + sovMetrics(uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForHugetlb := "[]*HugetlbStat{" + for _, f := range this.Hugetlb { + repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," + } + repeatedStringForHugetlb += "}" + repeatedStringForNetwork := "[]*NetworkStat{" + for _, f := range this.Network { + repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," + } + repeatedStringForNetwork += "}" + s := strings.Join([]string{`&Metrics{`, + `Hugetlb:` + repeatedStringForHugetlb + `,`, + `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, + `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, + `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, + `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + repeatedStringForNetwork + `,`, + `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, + `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *HugetlbStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HugetlbStat{`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *PidsStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsStat{`, + `Current:` + fmt.Sprintf("%v", this.Current) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUStat{`, + `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, + `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUUsage{`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Throttle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Throttle{`, + `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, + `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, + `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryStat{`, + `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, + `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, + `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, + `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, + `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, + `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, + `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, + `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, + `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, + `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, + `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, + `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, + `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, + `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, + `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, + `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, + `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, + `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, + `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, + `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, + `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, + `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, + `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, + `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, + `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, + `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, + `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, + `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, + `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, + `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, + `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, + `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, + `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryEntry{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryOomControl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryOomControl{`, + `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, + `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, + `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceBytesRecursive { + repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceBytesRecursive += "}" + repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServicedRecursive { + repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServicedRecursive += "}" + repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoQueuedRecursive { + repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoQueuedRecursive += "}" + repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceTimeRecursive { + repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceTimeRecursive += "}" + repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoWaitTimeRecursive { + repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoWaitTimeRecursive += "}" + repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoMergedRecursive { + repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoMergedRecursive += "}" + repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoTimeRecursive { + repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoTimeRecursive += "}" + repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" + for _, f := range this.SectorsRecursive { + repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForSectorsRecursive += "}" + s := strings.Join([]string{`&BlkIOStat{`, + `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, + `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, + `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, + `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, + `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, + `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, + `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, + `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlkIOEntry{`, + `Op:` + fmt.Sprintf("%v", this.Op) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Major:` + fmt.Sprintf("%v", this.Major) + `,`, + `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrent := "[]*RdmaEntry{" + for _, f := range this.Current { + repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForCurrent += "}" + repeatedStringForLimit := "[]*RdmaEntry{" + for _, f := range this.Limit { + repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForLimit += "}" + s := strings.Join([]string{`&RdmaStat{`, + `Current:` + repeatedStringForCurrent + `,`, + `Limit:` + repeatedStringForLimit + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RdmaEntry{`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, + `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkStat{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, + `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, + `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, + `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, + `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, + `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, + `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, + `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupStats{`, + `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, + `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, + `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, + `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, + `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) + if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pids == nil { + m.Pids = &PidsStat{} + } + if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &CPUStat{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryStat{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blkio == nil { + m.Blkio = &BlkIOStat{} + } + if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rdma == nil { + m.Rdma = &RdmaStat{} + } + if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = append(m.Network, &NetworkStat{}) + if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupStats == nil { + m.CgroupStats = &CgroupStats{} + } + if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryOomControl == nil { + m.MemoryOomControl = &MemoryOomControl{} + } + if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugetlbStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pagesize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &CPUUsage{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttling == nil { + m.Throttling = &Throttle{} + } + if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + m.Kernel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kernel |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + m.User = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.User |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PerCPU) == 0 { + m.PerCPU = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Throttle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Throttle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) + } + m.Periods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Periods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) + } + m.ThrottledPeriods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledPeriods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) + } + m.ThrottledTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + m.Cache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) + } + m.RSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) + } + m.RSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) + } + m.MappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) + } + m.Dirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) + } + m.Writeback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Writeback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) + } + m.PgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) + } + m.PgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) + } + m.PgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) + } + m.PgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) + } + m.InactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) + } + m.ActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) + } + m.InactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) + } + m.ActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) + } + m.Unevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) + } + m.HierarchicalMemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) + } + m.HierarchicalSwapLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) + } + m.TotalCache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalCache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) + } + m.TotalRSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) + } + m.TotalRSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) + } + m.TotalMappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalMappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) + } + m.TotalDirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) + } + m.TotalWriteback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalWriteback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) + } + m.TotalPgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) + } + m.TotalPgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) + } + m.TotalPgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) + } + m.TotalPgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) + } + m.TotalInactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) + } + m.TotalActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) + } + m.TotalInactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) + } + m.TotalActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) + } + m.TotalUnevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalUnevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &MemoryEntry{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &MemoryEntry{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &MemoryEntry{} + } + if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTCP == nil { + m.KernelTCP = &MemoryEntry{} + } + if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) + } + m.OomKillDisable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKillDisable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) + } + m.UnderOom = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnderOom |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) + } + m.OomKill = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKill |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) + if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) + if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) + if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) + if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) + if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) + if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) + if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) + if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Op = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + m.Major = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Major |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + m.Minor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minor |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Current = append(m.Current, &RdmaEntry{}) + if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limit = append(m.Limit, &RdmaEntry{}) + if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) + } + m.HcaHandles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaHandles |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) + } + m.HcaObjects = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaObjects |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + m.RxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) + } + m.RxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + m.RxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) + } + m.RxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + m.TxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) + } + m.TxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + m.TxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) + } + m.TxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) + } + m.NrSleeping = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrSleeping |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) + } + m.NrRunning = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrRunning |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) + } + m.NrStopped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrStopped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) + } + m.NrUninterruptible = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrUninterruptible |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) + } + m.NrIoWait = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrIoWait |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt new file mode 100644 index 00000000000..e476cea6412 --- /dev/null +++ b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt @@ -0,0 +1,790 @@ +file { + name: "github.com/containerd/cgroups/stats/v1/metrics.proto" + package: "io.containerd.cgroups.v1" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Metrics" + field { + name: "hugetlb" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.HugetlbStat" + json_name: "hugetlb" + } + field { + name: "pids" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.PidsStat" + json_name: "pids" + } + field { + name: "cpu" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUStat" + options { + 65004: "CPU" + } + json_name: "cpu" + } + field { + name: "memory" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryStat" + json_name: "memory" + } + field { + name: "blkio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOStat" + json_name: "blkio" + } + field { + name: "rdma" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaStat" + json_name: "rdma" + } + field { + name: "network" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.NetworkStat" + json_name: "network" + } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } + field { + name: "memory_oom_control" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryOomControl" + json_name: "memoryOomControl" + } + } + message_type { + name: "HugetlbStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + field { + name: "pagesize" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "pagesize" + } + } + message_type { + name: "PidsStat" + field { + name: "current" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + } + message_type { + name: "CPUStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUUsage" + json_name: "usage" + } + field { + name: "throttling" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.Throttle" + json_name: "throttling" + } + } + message_type { + name: "CPUUsage" + field { + name: "total" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "total" + } + field { + name: "kernel" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "kernel" + } + field { + name: "user" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "user" + } + field { + name: "per_cpu" + number: 4 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + 65004: "PerCPU" + } + json_name: "perCpu" + } + } + message_type { + name: "Throttle" + field { + name: "periods" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "periods" + } + field { + name: "throttled_periods" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledPeriods" + } + field { + name: "throttled_time" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledTime" + } + } + message_type { + name: "MemoryStat" + field { + name: "cache" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "cache" + } + field { + name: "rss" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSS" + } + json_name: "rss" + } + field { + name: "rss_huge" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSSHuge" + } + json_name: "rssHuge" + } + field { + name: "mapped_file" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "mappedFile" + } + field { + name: "dirty" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "dirty" + } + field { + name: "writeback" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "writeback" + } + field { + name: "pg_pg_in" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgIn" + } + field { + name: "pg_pg_out" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgOut" + } + field { + name: "pg_fault" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgFault" + } + field { + name: "pg_maj_fault" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgMajFault" + } + field { + name: "inactive_anon" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveAnon" + } + field { + name: "active_anon" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeAnon" + } + field { + name: "inactive_file" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveFile" + } + field { + name: "active_file" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeFile" + } + field { + name: "unevictable" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "unevictable" + } + field { + name: "hierarchical_memory_limit" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalMemoryLimit" + } + field { + name: "hierarchical_swap_limit" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalSwapLimit" + } + field { + name: "total_cache" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalCache" + } + field { + name: "total_rss" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSS" + } + json_name: "totalRss" + } + field { + name: "total_rss_huge" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSSHuge" + } + json_name: "totalRssHuge" + } + field { + name: "total_mapped_file" + number: 21 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalMappedFile" + } + field { + name: "total_dirty" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalDirty" + } + field { + name: "total_writeback" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalWriteback" + } + field { + name: "total_pg_pg_in" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgIn" + } + field { + name: "total_pg_pg_out" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgOut" + } + field { + name: "total_pg_fault" + number: 26 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgFault" + } + field { + name: "total_pg_maj_fault" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgMajFault" + } + field { + name: "total_inactive_anon" + number: 28 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveAnon" + } + field { + name: "total_active_anon" + number: 29 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveAnon" + } + field { + name: "total_inactive_file" + number: 30 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveFile" + } + field { + name: "total_active_file" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveFile" + } + field { + name: "total_unevictable" + number: 32 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalUnevictable" + } + field { + name: "usage" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "usage" + } + field { + name: "swap" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "swap" + } + field { + name: "kernel" + number: 35 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "kernel" + } + field { + name: "kernel_tcp" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + options { + 65004: "KernelTCP" + } + json_name: "kernelTcp" + } + } + message_type { + name: "MemoryEntry" + field { + name: "limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + field { + name: "usage" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + } + message_type { + name: "MemoryOomControl" + field { + name: "oom_kill_disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKillDisable" + } + field { + name: "under_oom" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "underOom" + } + field { + name: "oom_kill" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKill" + } + } + message_type { + name: "BlkIOStat" + field { + name: "io_service_bytes_recursive" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceBytesRecursive" + } + field { + name: "io_serviced_recursive" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServicedRecursive" + } + field { + name: "io_queued_recursive" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioQueuedRecursive" + } + field { + name: "io_service_time_recursive" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceTimeRecursive" + } + field { + name: "io_wait_time_recursive" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioWaitTimeRecursive" + } + field { + name: "io_merged_recursive" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioMergedRecursive" + } + field { + name: "io_time_recursive" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioTimeRecursive" + } + field { + name: "sectors_recursive" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "sectorsRecursive" + } + } + message_type { + name: "BlkIOEntry" + field { + name: "op" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "op" + } + field { + name: "device" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "major" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "major" + } + field { + name: "minor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "minor" + } + field { + name: "value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "RdmaStat" + field { + name: "current" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "limit" + } + } + message_type { + name: "RdmaEntry" + field { + name: "device" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "hca_handles" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaHandles" + } + field { + name: "hca_objects" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaObjects" + } + } + message_type { + name: "NetworkStat" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "rx_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxBytes" + } + field { + name: "rx_packets" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxPackets" + } + field { + name: "rx_errors" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxErrors" + } + field { + name: "rx_dropped" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxDropped" + } + field { + name: "tx_bytes" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txBytes" + } + field { + name: "tx_packets" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txPackets" + } + field { + name: "tx_errors" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txErrors" + } + field { + name: "tx_dropped" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txDropped" + } + } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } + syntax: "proto3" +} diff --git a/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto new file mode 100644 index 00000000000..b3f6cc37d8c --- /dev/null +++ b/ecs-agent/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package io.containerd.cgroups.v1; + +import "gogoproto/gogo.proto"; + +message Metrics { + repeated HugetlbStat hugetlb = 1; + PidsStat pids = 2; + CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; + MemoryStat memory = 4; + BlkIOStat blkio = 5; + RdmaStat rdma = 6; + repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; + MemoryOomControl memory_oom_control = 9; +} + +message HugetlbStat { + uint64 usage = 1; + uint64 max = 2; + uint64 failcnt = 3; + string pagesize = 4; +} + +message PidsStat { + uint64 current = 1; + uint64 limit = 2; +} + +message CPUStat { + CPUUsage usage = 1; + Throttle throttling = 2; +} + +message CPUUsage { + // values in nanoseconds + uint64 total = 1; + uint64 kernel = 2; + uint64 user = 3; + repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; + +} + +message Throttle { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message MemoryStat { + uint64 cache = 1; + uint64 rss = 2 [(gogoproto.customname) = "RSS"]; + uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; + uint64 mapped_file = 4; + uint64 dirty = 5; + uint64 writeback = 6; + uint64 pg_pg_in = 7; + uint64 pg_pg_out = 8; + uint64 pg_fault = 9; + uint64 pg_maj_fault = 10; + uint64 inactive_anon = 11; + uint64 active_anon = 12; + uint64 inactive_file = 13; + uint64 active_file = 14; + uint64 unevictable = 15; + uint64 hierarchical_memory_limit = 16; + uint64 hierarchical_swap_limit = 17; + uint64 total_cache = 18; + uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; + uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; + uint64 total_mapped_file = 21; + uint64 total_dirty = 22; + uint64 total_writeback = 23; + uint64 total_pg_pg_in = 24; + uint64 total_pg_pg_out = 25; + uint64 total_pg_fault = 26; + uint64 total_pg_maj_fault = 27; + uint64 total_inactive_anon = 28; + uint64 total_active_anon = 29; + uint64 total_inactive_file = 30; + uint64 total_active_file = 31; + uint64 total_unevictable = 32; + MemoryEntry usage = 33; + MemoryEntry swap = 34; + MemoryEntry kernel = 35; + MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; + +} + +message MemoryEntry { + uint64 limit = 1; + uint64 usage = 2; + uint64 max = 3; + uint64 failcnt = 4; +} + +message MemoryOomControl { + uint64 oom_kill_disable = 1; + uint64 under_oom = 2; + uint64 oom_kill = 3; +} + +message BlkIOStat { + repeated BlkIOEntry io_service_bytes_recursive = 1; + repeated BlkIOEntry io_serviced_recursive = 2; + repeated BlkIOEntry io_queued_recursive = 3; + repeated BlkIOEntry io_service_time_recursive = 4; + repeated BlkIOEntry io_wait_time_recursive = 5; + repeated BlkIOEntry io_merged_recursive = 6; + repeated BlkIOEntry io_time_recursive = 7; + repeated BlkIOEntry sectors_recursive = 8; +} + +message BlkIOEntry { + string op = 1; + string device = 2; + uint64 major = 3; + uint64 minor = 4; + uint64 value = 5; +} + +message RdmaStat { + repeated RdmaEntry current = 1; + repeated RdmaEntry limit = 2; +} + +message RdmaEntry { + string device = 1; + uint32 hca_handles = 2; + uint32 hca_objects = 3; +} + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/LICENSE b/ecs-agent/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 00000000000..8f71f43fee3 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/libcni/api.go b/ecs-agent/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 00000000000..0d82a2dd3c6 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,679 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + + return ioutil.WriteFile(fname, newBytes, 0600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/libcni/conf.go b/ecs-agent/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 00000000000..3cd6a59d1c0 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,270 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/containernetworking/cni/pkg/types" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 00000000000..3cdb4bc8dad --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (*inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 00000000000..8defe4dd398 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 00000000000..3ad07aa8f2d --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,187 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// Plugin must return result in same version as specified in netconf; but +// for backwards compatibility reasons if the result version is empty use +// config version (rather than technically correct 0.1.0). +// https://github.com/containernetworking/cni/issues/895 +func fixupResultVersion(netconf, result []byte) (string, []byte, error) { + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return "", nil, err + } + + var rawResult map[string]interface{} + if err := json.Unmarshal(result, &rawResult); err != nil { + return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) + } + + // plugin output of "null" is successfully unmarshalled, but results in a nil + // map which causes a panic when the confVersion is assigned below. + if rawResult == nil { + rawResult = make(map[string]interface{}) + } + + // Manually decode Result version; we need to know whether its cniVersion + // is empty, while built-in decoders (correctly) substitute 0.1.0 for an + // empty version per the CNI spec. + if resultVerRaw, ok := rawResult["cniVersion"]; ok { + resultVer, ok := resultVerRaw.(string) + if ok && resultVer != "" { + return resultVer, result, nil + } + } + + // If the cniVersion is not present or empty, assume the result is + // the same CNI spec version as the config + rawResult["cniVersion"] = confVersion + newBytes, err := json.Marshal(rawResult) + if err != nil { + return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err) + } + + return confVersion, newBytes, nil +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes) + if err != nil { + return nil, err + } + + return create.Create(resultVersion, fixedBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 00000000000..e62029eb788 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,48 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if strings.ContainsRune(plugin, os.PathSeparator) { + return "", fmt.Errorf("invalid plugin name: %s", plugin) + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 00000000000..9bcfb455367 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 00000000000..7665125b133 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 00000000000..5ab5cc88576 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,88 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // Retry the command on "text file busy" errors + for i := 0; i <= 5; i++ { + err := c.Run() + + // Command succeeded + if err == nil { + break + } + + // If the plugin is currently about to be written, then we wait a + // second and try it again + if strings.Contains(err.Error(), "text file busy") { + time.Sleep(time.Second) + continue + } + + // All other errors except than the busy text file + return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) + } + + // Copy stderr to caller's buffer in case plugin printed to both + // stdout and stderr for some reason. Ignore failures as stderr is + // only informational. + if e.Stderr != nil && stderr.Len() > 0 { + _, _ = stderr.WriteTo(e.Stderr) + } + return stdout.Bytes(), nil +} + +func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { + emsg := types.Error{} + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) + } + return &emsg +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 00000000000..99b151ff240 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,189 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.2.0" + +var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010) + convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +// NewResult creates a new Result object from JSON data. The JSON data +// must be compatible with the CNI versions implemented by this type. +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + if result.CNIVersion == "" { + result.CNIVersion = "0.1.0" + } + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +// GetResult converts the given Result object to the ImplementedSpecVersion +// and returns the concrete type or an error +func GetResult(r types.Result) (*Result, error) { + result020, err := convert.Convert(r, ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func convertFrom010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.2.0" { + panic("only converts to version 0.2.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: ImplementedSpecVersion, + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +func convertTo010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.1.0" { + panic("only converts to version 0.1.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: "0.1.0", + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + var routes []types.Route + for _, fromRoute := range i.Routes { + routes = append(routes, *fromRoute.Copy()) + } + return &IPConfig{ + IP: i.IP, + Gateway: i.Gateway, + Routes: routes, + } +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/040/types.go new file mode 100644 index 00000000000..3633b0eaa3a --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/040/types.go @@ -0,0 +1,306 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types040 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types020 "github.com/containernetworking/cni/pkg/types/020" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.4.0" + +var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertInternal) + convert.RegisterConverter("0.3.1", supportedVersions, convertInternal) + + // Down-converters + convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal) + convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig { + return &IPConfig{ + Version: ipVersion, + Address: from.IP, + Gateway: from.Gateway, + } +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types020.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + if fromResult.IP4 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4")) + for _, fromRoute := range fromResult.IP4.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + if fromResult.IP6 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6")) + for _, fromRoute := range fromResult.IP6.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + return toResult, nil +} + +func convertInternal(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy()) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, fromIPC.Copy()) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types020.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + } + + for _, fromIP := range fromResult.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if fromIP.Version == "4" && toResult.IP4 == nil { + toResult.IP4 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } else if fromIP.Version == "6" && toResult.IP6 == nil { + toResult.IP6 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } + if toResult.IP4 != nil && toResult.IP6 != nil { + break + } + } + + for _, fromRoute := range fromResult.Routes { + is4 := fromRoute.Dst.IP.To4() != nil + if is4 && toResult.IP4 != nil { + toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } else if !is4 && toResult.IP6 != nil { + toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } + } + + // 0.2.0 and earlier require at least one IP address in the Result + if toResult.IP4 == nil && toResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return toResult, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Version: i.Version, + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/100/types.go new file mode 100644 index 00000000000..0e1e8b857b7 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -0,0 +1,307 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types100 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "1.0.0" + +var supportedVersions = []string{ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + + // Down-converters + convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + result040, err := convert.Convert(from, "0.4.0") + if err != nil { + return nil, err + } + result100, err := convertFrom04x(result040, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return result100, nil +} + +func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig { + to := &IPConfig{ + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceFrom040(from *types040.Interface) *Interface { + return &Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertFrom04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types040.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertIPConfigTo040(from *IPConfig) *types040.IPConfig { + version := "6" + if from.Address.IP.To4() != nil { + version = "4" + } + to := &types040.IPConfig{ + Version: version, + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceTo040(from *Interface) *types040.Interface { + return &types040.Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertTo04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types040.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + // First convert to 0.4.0 + result040, err := convertTo04x(from, "0.4.0") + if err != nil { + return nil, err + } + result02x, err := convert.Convert(result040, toVersion) + if err != nil { + return nil, err + } + return result02x, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/args.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 00000000000..7516f03ef58 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,122 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + + var keyFieldInterface interface{} + switch { + case keyField.Kind() == reflect.Ptr: + keyField.Set(reflect.New(keyField.Type().Elem())) + keyFieldInterface = keyField.Interface() + case keyField.CanAddr() && keyField.Addr().CanInterface(): + keyFieldInterface = keyField.Addr().Interface() + default: + return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)} + } + u, ok := keyFieldInterface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldInterface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/create/create.go new file mode 100644 index 00000000000..ed28b33e8e1 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -0,0 +1,56 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +// DecodeVersion returns the CNI version from CNI configuration or result JSON, +// or an error if the operation could not be performed. +func DecodeVersion(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %w", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} + +// Create creates a CNI Result using the given JSON with the expected +// version, or an error if the creation could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + return convert.Create(version, bytes) +} + +// CreateFromBytes creates a CNI Result from the given JSON, automatically +// detecting the CNI spec version of the result. An error is returned if the +// operation could not be performed. +func CreateFromBytes(bytes []byte) (types.Result, error) { + version, err := DecodeVersion(bytes) + if err != nil { + return nil, err + } + return convert.Create(version, bytes) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go new file mode 100644 index 00000000000..bdbe4b0a59a --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go @@ -0,0 +1,92 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// ConvertFn should convert from the given arbitrary Result type into a +// Result implementing CNI specification version passed in toVersion. +// The function is guaranteed to be passed a Result type matching the +// fromVersion it was registered with, and is guaranteed to be +// passed a toVersion matching one of the toVersions it was registered with. +type ConvertFn func(from types.Result, toVersion string) (types.Result, error) + +type converter struct { + // fromVersion is the CNI Result spec version that convertFn accepts + fromVersion string + // toVersions is a list of versions that convertFn can convert to + toVersions []string + convertFn ConvertFn +} + +var converters []*converter + +func findConverter(fromVersion, toVersion string) *converter { + for _, c := range converters { + if c.fromVersion == fromVersion { + for _, v := range c.toVersions { + if v == toVersion { + return c + } + } + } + } + return nil +} + +// Convert converts a CNI Result to the requested CNI specification version, +// or returns an error if the conversion could not be performed or failed +func Convert(from types.Result, toVersion string) (types.Result, error) { + if toVersion == "" { + toVersion = "0.1.0" + } + + fromVersion := from.Version() + + // Shortcut for same version + if fromVersion == toVersion { + return from, nil + } + + // Otherwise find the right converter + c := findConverter(fromVersion, toVersion) + if c == nil { + return nil, fmt.Errorf("no converter for CNI result version %s to %s", + fromVersion, toVersion) + } + return c.convertFn(from, toVersion) +} + +// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) { + // Make sure there is no converter already registered for these + // from and to versions + for _, v := range toVersions { + if findConverter(fromVersion, v) != nil { + panic(fmt.Sprintf("converter already registered for %s to %s", + fromVersion, v)) + } + } + converters = append(converters, &converter{ + fromVersion: fromVersion, + toVersions: toVersions, + convertFn: convertFn, + }) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go new file mode 100644 index 00000000000..96363091259 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go @@ -0,0 +1,66 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +type ResultFactoryFunc func([]byte) (types.Result, error) + +type creator struct { + // CNI Result spec versions that createFn can create a Result for + versions []string + createFn ResultFactoryFunc +} + +var creators []*creator + +func findCreator(version string) *creator { + for _, c := range creators { + for _, v := range c.versions { + if v == version { + return c + } + } + } + return nil +} + +// Create creates a CNI Result using the given JSON, or an error if the creation +// could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + if c := findCreator(version); c != nil { + return c.createFn(bytes) + } + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterCreator(versions []string, createFn ResultFactoryFunc) { + // Make sure there is no creator already registered for these versions + for _, v := range versions { + if findCreator(v) != nil { + panic(fmt.Sprintf("creator already registered for %s", v)) + } + } + creators = append(creators, &creator{ + versions: versions, + createFn: createFn, + }) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/types.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 00000000000..fba17dfc0f3 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,234 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +func (d *DNS) Copy() *DNS { + if d == nil { + return nil + } + + to := &DNS{Domain: d.Domain} + for _, ns := range d.Nameservers { + to.Nameservers = append(to.Nameservers, ns) + } + for _, s := range d.Search { + to.Search = append(to.Search, s) + } + for _, o := range d.Options { + to.Options = append(to.Options, o) + } + return to +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +func (r *Route) Copy() *Route { + if r == nil { + return nil + } + + return &Route{ + Dst: r.Dst, + GW: r.GW, + } +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 00000000000..b8ec3887459 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "regexp" + "unicode" + + "github.com/containernetworking/cni/pkg/types" +) + +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 00000000000..808c33b8382 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,26 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "github.com/containernetworking/cni/pkg/types/create" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + return create.DecodeVersion(jsonBytes) +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 00000000000..17b22b6b0c4 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %w", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { // special case: no version declared == v0.1.0 + return 0, 1, 0, nil + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 00000000000..25c3810b2aa --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/version.go b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 00000000000..1326f8038e5 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,89 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/types/create" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return types100.ImplementedSpecVersion +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") + +// VersionsFrom returns a list of versions starting from min, inclusive +func VersionsStartingFrom(min string) PluginInfo { + out := []string{} + // cheat, just assume ordered + ok := false + for _, v := range All.SupportedVersions() { + if !ok && v == min { + ok = true + } + if ok { + out = append(out, v) + } + } + return PluginSupports(out...) +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + return create.Create(version, resultBytes) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the + // result version must match the config version, if the Result's version + // is empty, inject the config version. + if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" { + conf.RawPrevResult["CNIVersion"] = conf.CNIVersion + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %w", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %w", err) + } + + return nil +} diff --git a/ecs-agent/vendor/github.com/containernetworking/plugins/LICENSE b/ecs-agent/vendor/github.com/containernetworking/plugins/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md new file mode 100644 index 00000000000..1e265c7a011 --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -0,0 +1,41 @@ +### Namespaces, Threads, and Go +On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. + +### Namespace Switching +Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. + +Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. + +For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. + +### Do() The Recommended Thing +The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: + +```go +err = targetNs.Do(func(hostNs ns.NetNS) error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "dummy0", + }, + } + return netlink.LinkAdd(dummy) +}) +``` + +Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. + +When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. + +In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. + + +### Creating network namespaces +Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. + + +### Further Reading + - https://github.com/golang/go/wiki/LockOSThread + - http://morsmachine.dk/go-scheduler + - https://github.com/containernetworking/cni/issues/262 + - https://golang.org/pkg/runtime/ + - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go new file mode 100644 index 00000000000..3b745d4913f --- /dev/null +++ b/ecs-agent/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -0,0 +1,234 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +// Returns an object representing the current OS thread's network namespace +func GetCurrentNS() (NetNS, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return GetNS(getCurrentThreadNetNSPath()) +} + +func getCurrentThreadNetNSPath() string { + // /proc/self/ns/net returns the namespace of the main thread, not + // of whatever thread this goroutine is running on. Make sure we + // use the thread's net namespace since the thread is switching around + return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) +} + +func (ns *netNS) Close() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := ns.file.Close(); err != nil { + return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) + } + ns.closed = true + + return nil +} + +func (ns *netNS) Set() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) + } + + return nil +} + +type NetNS interface { + // Executes the passed closure in this object's network namespace, + // attempting to restore the original namespace before returning. + // However, since each OS thread can have a different network namespace, + // and Go's thread scheduling is highly variable, callers cannot + // guarantee any specific namespace is set unless operations that + // require that namespace are wrapped with Do(). Also, no code called + // from Do() should call runtime.UnlockOSThread(), or the risk + // of executing code in an incorrect namespace will be greater. See + // https://github.com/golang/go/wiki/LockOSThread for further details. + Do(toRun func(NetNS) error) error + + // Sets the current network namespace to this object's network namespace. + // Note that since Go's thread scheduling is highly variable, callers + // cannot guarantee the requested namespace will be the current namespace + // after this function is called; to ensure this wrap operations that + // require the namespace with Do() instead. + Set() error + + // Returns the filesystem path representing this object's network namespace + Path() string + + // Returns a file descriptor representing this object's network namespace + Fd() uintptr + + // Cleans up this instance of the network namespace; if this instance + // is the last user the namespace will be destroyed + Close() error +} + +type netNS struct { + file *os.File + closed bool +} + +// netNS implements the NetNS interface +var _ NetNS = &netNS{} + +const ( + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h + NSFS_MAGIC = 0x6e736673 + PROCFS_MAGIC = 0x9fa0 +) + +type NSPathNotExistErr struct{ msg string } + +func (e NSPathNotExistErr) Error() string { return e.msg } + +type NSPathNotNSErr struct{ msg string } + +func (e NSPathNotNSErr) Error() string { return e.msg } + +func IsNSorErr(nspath string) error { + stat := syscall.Statfs_t{} + if err := syscall.Statfs(nspath, &stat); err != nil { + if os.IsNotExist(err) { + err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} + } else { + err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) + } + return err + } + + switch stat.Type { + case PROCFS_MAGIC, NSFS_MAGIC: + return nil + default: + return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} + } +} + +// Returns an object representing the namespace referred to by @path +func GetNS(nspath string) (NetNS, error) { + err := IsNSorErr(nspath) + if err != nil { + return nil, err + } + + fd, err := os.Open(nspath) + if err != nil { + return nil, err + } + + return &netNS{file: fd}, nil +} + +func (ns *netNS) Path() string { + return ns.file.Name() +} + +func (ns *netNS) Fd() uintptr { + return ns.file.Fd() +} + +func (ns *netNS) errorIfClosed() error { + if ns.closed { + return fmt.Errorf("%q has already been closed", ns.file.Name()) + } + return nil +} + +func (ns *netNS) Do(toRun func(NetNS) error) error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + containedCall := func(hostNS NetNS) error { + threadNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("failed to open current netns: %v", err) + } + defer threadNS.Close() + + // switch to target namespace + if err = ns.Set(); err != nil { + return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) + } + defer func() { + err := threadNS.Set() // switch back + if err == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + return toRun(hostNS) + } + + // save a handle to current network namespace + hostNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("Failed to open current namespace: %v", err) + } + defer hostNS.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Start the callback in a new green thread so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + var innerError error + go func() { + defer wg.Done() + runtime.LockOSThread() + innerError = containedCall(hostNS) + }() + wg.Wait() + + return innerError +} + +// WithNetNSPath executes the passed closure under the given network +// namespace, restoring the original namespace afterwards. +func WithNetNSPath(nspath string, toRun func(NetNS) error) error { + ns, err := GetNS(nspath) + if err != nil { + return err + } + defer ns.Close() + return ns.Do(toRun) +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 00000000000..0b4659b731e --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 00000000000..081c86fa8ec --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 00000000000..1e91766aeea --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 00000000000..f6502e4b901 --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000000..b80c85653f7 --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 00000000000..390d4e4be6b --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 00000000000..3496dc99d5a --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000000..a85bf1984c6 --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 00000000000..18b2a3318a5 --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 00000000000..165b2110df8 --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 00000000000..e0846a357d5 --- /dev/null +++ b/ecs-agent/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/ecs-agent/vendor/github.com/golang/groupcache/LICENSE b/ecs-agent/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/ecs-agent/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ecs-agent/vendor/github.com/golang/groupcache/lru/lru.go b/ecs-agent/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 00000000000..eac1c7664f9 --- /dev/null +++ b/ecs-agent/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/ecs-agent/vendor/github.com/golang/mock/gomock/call.go b/ecs-agent/vendor/github.com/golang/mock/gomock/call.go index 7345f6540f6..13c9f44b1ef 100644 --- a/ecs-agent/vendor/github.com/golang/mock/gomock/call.go +++ b/ecs-agent/vendor/github.com/golang/mock/gomock/call.go @@ -50,19 +50,22 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle t.Helper() // TODO: check arity, types. - margs := make([]Matcher, len(args)) + mArgs := make([]Matcher, len(args)) for i, arg := range args { if m, ok := arg.(Matcher); ok { - margs[i] = m + mArgs[i] = m } else if arg == nil { // Handle nil specially so that passing a nil interface value // will match the typed nils of concrete args. - margs[i] = Nil() + mArgs[i] = Nil() } else { - margs[i] = Eq(arg) + mArgs[i] = Eq(arg) } } + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. origin := callerInfo(3) actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { // Synthesize the zero value for each of the return args' types. @@ -73,7 +76,7 @@ func newCall(t TestHelper, receiver interface{}, method string, methodType refle return rets }} return &Call{t: t, receiver: receiver, method: method, methodType: methodType, - args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} + args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} } // AnyTimes allows the expectation to be called 0 or more times @@ -110,19 +113,25 @@ func (c *Call) DoAndReturn(f interface{}) *Call { v := reflect.ValueOf(f) c.addAction(func(args []interface{}) []interface{} { - vargs := make([]reflect.Value, len(args)) + c.t.Helper() + vArgs := make([]reflect.Value, len(args)) ft := v.Type() + if c.methodType.NumIn() != ft.NumIn() { + c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + return nil + } for i := 0; i < len(args); i++ { if args[i] != nil { - vargs[i] = reflect.ValueOf(args[i]) + vArgs[i] = reflect.ValueOf(args[i]) } else { // Use the zero value for the arg. - vargs[i] = reflect.Zero(ft.In(i)) + vArgs[i] = reflect.Zero(ft.In(i)) } } - vrets := v.Call(vargs) - rets := make([]interface{}, len(vrets)) - for i, ret := range vrets { + vRets := v.Call(vArgs) + rets := make([]interface{}, len(vRets)) + for i, ret := range vRets { rets[i] = ret.Interface() } return rets @@ -139,17 +148,23 @@ func (c *Call) Do(f interface{}) *Call { v := reflect.ValueOf(f) c.addAction(func(args []interface{}) []interface{} { - vargs := make([]reflect.Value, len(args)) + c.t.Helper() + if c.methodType.NumIn() != v.Type().NumIn() { + c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin) + return nil + } + vArgs := make([]reflect.Value, len(args)) ft := v.Type() for i := 0; i < len(args); i++ { if args[i] != nil { - vargs[i] = reflect.ValueOf(args[i]) + vArgs[i] = reflect.ValueOf(args[i]) } else { // Use the zero value for the arg. - vargs[i] = reflect.Zero(ft.In(i)) + vArgs[i] = reflect.Zero(ft.In(i)) } } - v.Call(vargs) + v.Call(vArgs) return nil }) return c @@ -301,14 +316,9 @@ func (c *Call) matches(args []interface{}) error { for i, m := range c.args { if !m.Matches(args[i]) { - got := fmt.Sprintf("%v", args[i]) - if gs, ok := m.(GotFormatter); ok { - got = gs.Got(args[i]) - } - return fmt.Errorf( "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", - c.origin, i, got, m, + c.origin, i, formatGottenArg(m, args[i]), m, ) } } @@ -331,7 +341,7 @@ func (c *Call) matches(args []interface{}) error { // Non-variadic args if !m.Matches(args[i]) { return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), args[i], m) + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) } continue } @@ -355,12 +365,12 @@ func (c *Call) matches(args []interface{}) error { // matches all the remaining arguments or the lack of any. // Convert the remaining arguments, if any, into a slice of the // expected type. - vargsType := c.methodType.In(c.methodType.NumIn() - 1) - vargs := reflect.MakeSlice(vargsType, 0, len(args)-i) + vArgsType := c.methodType.In(c.methodType.NumIn() - 1) + vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i) for _, arg := range args[i:] { - vargs = reflect.Append(vargs, reflect.ValueOf(arg)) + vArgs = reflect.Append(vArgs, reflect.ValueOf(arg)) } - if m.Matches(vargs.Interface()) { + if m.Matches(vArgs.Interface()) { // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any()) // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher) // Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any()) @@ -373,16 +383,16 @@ func (c *Call) matches(args []interface{}) error { // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) // Got Foo(a, b, c) want Foo(matcherA, matcherB) - return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), args[i:], c.args[i]) + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) } } // Check that all prerequisite calls have been satisfied. for _, preReqCall := range c.preReqs { if !preReqCall.satisfied() { - return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", + return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", c.origin, preReqCall, c) } } @@ -425,3 +435,11 @@ func setSlice(arg interface{}, v reflect.Value) { func (c *Call) addAction(action func([]interface{}) []interface{}) { c.actions = append(c.actions, action) } + +func formatGottenArg(m Matcher, arg interface{}) string { + got := fmt.Sprintf("%v (%T)", arg, arg) + if gs, ok := m.(GotFormatter); ok { + got = gs.Got(arg) + } + return got +} diff --git a/ecs-agent/vendor/github.com/golang/mock/gomock/callset.go b/ecs-agent/vendor/github.com/golang/mock/gomock/callset.go index b046b525e96..49dba787a40 100644 --- a/ecs-agent/vendor/github.com/golang/mock/gomock/callset.go +++ b/ecs-agent/vendor/github.com/golang/mock/gomock/callset.go @@ -16,6 +16,7 @@ package gomock import ( "bytes" + "errors" "fmt" ) @@ -84,14 +85,18 @@ func (cs callSet) FindMatch(receiver interface{}, method string, args []interfac for _, call := range exhausted { if err := call.matches(args); err != nil { _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + continue } + _, _ = fmt.Fprintf( + &callsErrors, "all expected calls for method %q have been exhausted", method, + ) } if len(expected)+len(exhausted) == 0 { _, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method) } - return nil, fmt.Errorf(callsErrors.String()) + return nil, errors.New(callsErrors.String()) } // Failures returns the calls that are not satisfied. diff --git a/ecs-agent/vendor/github.com/golang/mock/gomock/controller.go b/ecs-agent/vendor/github.com/golang/mock/gomock/controller.go index d7c3c656ae0..f054200d56c 100644 --- a/ecs-agent/vendor/github.com/golang/mock/gomock/controller.go +++ b/ecs-agent/vendor/github.com/golang/mock/gomock/controller.go @@ -50,9 +50,6 @@ // mockObj.EXPECT().SomeMethod(2, "second"), // mockObj.EXPECT().SomeMethod(3, "third"), // ) -// -// TODO: -// - Handle different argument/return types (e.g. ..., chan, map, interface). package gomock import ( @@ -77,6 +74,15 @@ type TestHelper interface { Helper() } +// cleanuper is used to check if TestHelper also has the `Cleanup` method. A +// common pattern is to pass in a `*testing.T` to +// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup +// method. This can be utilized to call `Finish()` so the caller of this library +// does not have to. +type cleanuper interface { + Cleanup(func()) +} + // A Controller represents the top-level control of a mock ecosystem. It // defines the scope and lifetime of mock objects, as well as their // expectations. It is safe to call Controller's methods from multiple @@ -115,29 +121,43 @@ type Controller struct { // NewController returns a new Controller. It is the preferred way to create a // Controller. +// +// New in go1.14+, if you are passing a *testing.T into this function you no +// longer need to call ctrl.Finish() in your test methods. func NewController(t TestReporter) *Controller { h, ok := t.(TestHelper) if !ok { - h = nopTestHelper{t} + h = &nopTestHelper{t} } - - return &Controller{ + ctrl := &Controller{ T: h, expectedCalls: newCallSet(), } + if c, ok := isCleanuper(ctrl.T); ok { + c.Cleanup(func() { + ctrl.T.Helper() + ctrl.finish(true, nil) + }) + } + + return ctrl } type cancelReporter struct { - TestHelper + t TestHelper cancel func() } func (r *cancelReporter) Errorf(format string, args ...interface{}) { - r.TestHelper.Errorf(format, args...) + r.t.Errorf(format, args...) } func (r *cancelReporter) Fatalf(format string, args ...interface{}) { defer r.cancel() - r.TestHelper.Fatalf(format, args...) + r.t.Fatalf(format, args...) +} + +func (r *cancelReporter) Helper() { + r.t.Helper() } // WithContext returns a new Controller and a Context, which is cancelled on any @@ -145,15 +165,22 @@ func (r *cancelReporter) Fatalf(format string, args ...interface{}) { func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { h, ok := t.(TestHelper) if !ok { - h = nopTestHelper{t} + h = &nopTestHelper{t: t} } ctx, cancel := context.WithCancel(ctx) - return NewController(&cancelReporter{h, cancel}), ctx + return NewController(&cancelReporter{t: h, cancel: cancel}), ctx } type nopTestHelper struct { - TestReporter + t TestReporter +} + +func (h *nopTestHelper) Errorf(format string, args ...interface{}) { + h.t.Errorf(format, args...) +} +func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { + h.t.Fatalf(format, args...) } func (h nopTestHelper) Helper() {} @@ -197,7 +224,10 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) if err != nil { - origin := callerInfo(2) + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. + origin := callerInfo(3) ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) } @@ -229,21 +259,33 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf // Finish checks to see if all the methods that were expected to be called // were called. It should be invoked for each Controller. It is not idempotent // and therefore can only be invoked once. +// +// New in go1.14+, if you are passing a *testing.T into NewController function you no +// longer need to call ctrl.Finish() in your test methods. func (ctrl *Controller) Finish() { + // If we're currently panicking, probably because this is a deferred call. + // This must be recovered in the deferred function. + err := recover() + ctrl.finish(false, err) +} + +func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { ctrl.T.Helper() ctrl.mu.Lock() defer ctrl.mu.Unlock() if ctrl.finished { - ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + if _, ok := isCleanuper(ctrl.T); !ok { + ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + } + return } ctrl.finished = true - // If we're currently panicking, probably because this is a deferred call, - // pass through the panic. - if err := recover(); err != nil { - panic(err) + // Short-circuit, pass through the panic. + if panicErr != nil { + panic(panicErr) } // Check that all remaining expected calls are satisfied. @@ -252,13 +294,43 @@ func (ctrl *Controller) Finish() { ctrl.T.Errorf("missing call(s) to %v", call) } if len(failures) != 0 { - ctrl.T.Fatalf("aborting test due to missing call(s)") + if !cleanup { + ctrl.T.Fatalf("aborting test due to missing call(s)") + return + } + ctrl.T.Errorf("aborting test due to missing call(s)") } } +// callerInfo returns the file:line of the call site. skip is the number +// of stack frames to skip when reporting. 0 is callerInfo's call site. func callerInfo(skip int) string { if _, file, line, ok := runtime.Caller(skip + 1); ok { return fmt.Sprintf("%s:%d", file, line) } return "unknown file" } + +// isCleanuper checks it if t's base TestReporter has a Cleanup method. +func isCleanuper(t TestReporter) (cleanuper, bool) { + tr := unwrapTestReporter(t) + c, ok := tr.(cleanuper) + return c, ok +} + +// unwrapTestReporter unwraps TestReporter to the base implementation. +func unwrapTestReporter(t TestReporter) TestReporter { + tr := t + switch nt := t.(type) { + case *cancelReporter: + tr = nt.t + if h, check := tr.(*nopTestHelper); check { + tr = h.t + } + case *nopTestHelper: + tr = nt.t + default: + // not wrapped + } + return tr +} diff --git a/ecs-agent/vendor/github.com/golang/mock/gomock/matchers.go b/ecs-agent/vendor/github.com/golang/mock/gomock/matchers.go index 7bfc07be43f..2822fb2c8c4 100644 --- a/ecs-agent/vendor/github.com/golang/mock/gomock/matchers.go +++ b/ecs-agent/vendor/github.com/golang/mock/gomock/matchers.go @@ -102,11 +102,25 @@ type eqMatcher struct { } func (e eqMatcher) Matches(x interface{}) bool { - return reflect.DeepEqual(e.x, x) + // In case, some value is nil + if e.x == nil || x == nil { + return reflect.DeepEqual(e.x, x) + } + + // Check if types assignable and convert them to common type + x1Val := reflect.ValueOf(e.x) + x2Val := reflect.ValueOf(x) + + if x1Val.Type().AssignableTo(x2Val.Type()) { + x1ValConverted := x1Val.Convert(x2Val.Type()) + return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) + } + + return false } func (e eqMatcher) String() string { - return fmt.Sprintf("is equal to %v", e.x) + return fmt.Sprintf("is equal to %v (%T)", e.x, e.x) } type nilMatcher struct{} @@ -139,7 +153,6 @@ func (n notMatcher) Matches(x interface{}) bool { } func (n notMatcher) String() string { - // TODO: Improve this if we add a NotString method to the Matcher interface. return "not(" + n.m.String() + ")" } @@ -194,6 +207,70 @@ func (m lenMatcher) String() string { return fmt.Sprintf("has length %d", m.i) } +type inAnyOrderMatcher struct { + x interface{} +} + +func (m inAnyOrderMatcher) Matches(x interface{}) bool { + given, ok := m.prepareValue(x) + if !ok { + return false + } + wanted, ok := m.prepareValue(m.x) + if !ok { + return false + } + + if given.Len() != wanted.Len() { + return false + } + + usedFromGiven := make([]bool, given.Len()) + foundFromWanted := make([]bool, wanted.Len()) + for i := 0; i < wanted.Len(); i++ { + wantedMatcher := Eq(wanted.Index(i).Interface()) + for j := 0; j < given.Len(); j++ { + if usedFromGiven[j] { + continue + } + if wantedMatcher.Matches(given.Index(j).Interface()) { + foundFromWanted[i] = true + usedFromGiven[j] = true + break + } + } + } + + missingFromWanted := 0 + for _, found := range foundFromWanted { + if !found { + missingFromWanted++ + } + } + extraInGiven := 0 + for _, used := range usedFromGiven { + if !used { + extraInGiven++ + } + } + + return extraInGiven == 0 && missingFromWanted == 0 +} + +func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) { + xValue := reflect.ValueOf(x) + switch xValue.Kind() { + case reflect.Slice, reflect.Array: + return xValue, true + default: + return reflect.Value{}, false + } +} + +func (m inAnyOrderMatcher) String() string { + return fmt.Sprintf("has the same elements as %v", m.x) +} + // Constructors // All returns a composite Matcher that returns true if and only all of the @@ -245,7 +322,7 @@ func Not(x interface{}) Matcher { // AssignableToTypeOf(s).Matches(time.Second) // returns true // AssignableToTypeOf(s).Matches(99) // returns false // -// var ctx = reflect.TypeOf((*context.Context)).Elem() +// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() // AssignableToTypeOf(ctx).Matches(context.Background()) // returns true func AssignableToTypeOf(x interface{}) Matcher { if xt, ok := x.(reflect.Type); ok { @@ -253,3 +330,12 @@ func AssignableToTypeOf(x interface{}) Matcher { } return assignableToTypeOfMatcher{reflect.TypeOf(x)} } + +// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order. +// +// Example usage: +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false +func InAnyOrder(x interface{}) Matcher { + return inAnyOrderMatcher{x} +} diff --git a/ecs-agent/vendor/github.com/golang/mock/mockgen/model/model.go b/ecs-agent/vendor/github.com/golang/mock/mockgen/model/model.go index 88783cfdf6e..2c6a62ceb26 100644 --- a/ecs-agent/vendor/github.com/golang/mock/mockgen/model/model.go +++ b/ecs-agent/vendor/github.com/golang/mock/mockgen/model/model.go @@ -71,6 +71,16 @@ func (intf *Interface) addImports(im map[string]bool) { } } +// AddMethod adds a new method, de-duplicating by method name. +func (intf *Interface) AddMethod(m *Method) { + for _, me := range intf.Methods { + if me.Name == m.Name { + return + } + } + intf.Methods = append(intf.Methods, m) +} + // Method is a single method of an interface. type Method struct { Name string @@ -250,11 +260,10 @@ func (mt *MapType) addImports(im map[string]bool) { // NamedType is an exported type in a package. type NamedType struct { Package string // may be empty - Type string // TODO: should this be typed Type? + Type string } func (nt *NamedType) String(pm map[string]string, pkgOverride string) string { - // TODO: is this right? if pkgOverride == nt.Package { return nt.Type } @@ -311,7 +320,7 @@ func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) { return nil, err } - intf.Methods = append(intf.Methods, m) + intf.AddMethod(m) } return intf, nil @@ -468,3 +477,19 @@ func impPath(imp string) string { } return imp } + +// ErrorInterface represent built-in error interface. +var ErrorInterface = Interface{ + Name: "error", + Methods: []*Method{ + { + Name: "Error", + Out: []*Parameter{ + { + Name: "", + Type: PredeclaredType("string"), + }, + }, + }, + }, +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/.gitignore b/ecs-agent/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 00000000000..1fb13abebe7 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/.golangci.yml b/ecs-agent/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 00000000000..65dc2850377 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/.travis.yml b/ecs-agent/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 00000000000..c1dbd5a3a3e --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/ecs-agent/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 00000000000..7567f612898 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/LICENSE b/ecs-agent/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 00000000000..f090cb42f37 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/README.md b/ecs-agent/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 00000000000..b042c896f25 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,513 @@ +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +func init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/alt_exit.go b/ecs-agent/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 00000000000..8fd189e1cca --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/appveyor.yml b/ecs-agent/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 00000000000..df9d65c3a5b --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/buffer_pool.go b/ecs-agent/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 00000000000..c7787f77cbf --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,43 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/doc.go b/ecs-agent/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 00000000000..da67aba06de --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/entry.go b/ecs-agent/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 00000000000..71cdbbc35d2 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,442 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + buffer = bufPool.Get() + defer func() { + newEntry.Buffer = nil + buffer.Reset() + bufPool.Put(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/exported.go b/ecs-agent/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 00000000000..017c30ce678 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/formatter.go b/ecs-agent/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 00000000000..408883773eb --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/hooks.go b/ecs-agent/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 00000000000..3f151cdc392 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/json_formatter.go b/ecs-agent/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000000..c96dc5636bf --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/logger.go b/ecs-agent/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 00000000000..5ff0aef6d3f --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,417 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/logrus.go b/ecs-agent/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 00000000000..2f16224cb9f --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 00000000000..2403de98192 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 00000000000..499789984d2 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 00000000000..ebdae3ec626 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 00000000000..97af92c68ea --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 00000000000..3293fb3caad --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 00000000000..f6710b3bd0b --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 00000000000..04748b8515f --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 00000000000..2879eb50ea6 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/text_formatter.go b/ecs-agent/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 00000000000..be2c6efe5ed --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/ecs-agent/vendor/github.com/sirupsen/logrus/writer.go b/ecs-agent/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 00000000000..72e8e3a1b65 --- /dev/null +++ b/ecs-agent/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,70 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d26792..95d8e59da69 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 00000000000..da867903e2f --- /dev/null +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 00000000000..1701af2a3c8 --- /dev/null +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_format.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a86..7880b8f9433 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07e..339515b8bfb 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_order.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a7..75944878358 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertions.go b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f57..fa1245b1897 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/ecs-agent/vendor/github.com/stretchr/testify/require/require.go b/ecs-agent/vendor/github.com/stretchr/testify/require/require.go index 51820df2e67..880853f5a2c 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/require/require.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { @@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/ecs-agent/vendor/github.com/stretchr/testify/require/require_forward.go b/ecs-agent/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83f3..960bf6f2cab 100644 --- a/ecs-agent/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/ecs-agent/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { @@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/ecs-agent/vendor/go.opencensus.io/.gitignore b/ecs-agent/vendor/go.opencensus.io/.gitignore new file mode 100644 index 00000000000..74a6db472e5 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/.gitignore @@ -0,0 +1,9 @@ +/.idea/ + +# go.opencensus.io/exporter/aws +/exporter/aws/ + +# Exclude vendor, use dep ensure after checkout: +/vendor/github.com/ +/vendor/golang.org/ +/vendor/google.golang.org/ diff --git a/ecs-agent/vendor/go.opencensus.io/AUTHORS b/ecs-agent/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 00000000000..e491a9e7f78 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/ecs-agent/vendor/go.opencensus.io/CONTRIBUTING.md b/ecs-agent/vendor/go.opencensus.io/CONTRIBUTING.md new file mode 100644 index 00000000000..1ba3962c8bf --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/CONTRIBUTING.md @@ -0,0 +1,63 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ + +## Instructions + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ go get -d go.opencensus.io +``` + +Add your fork as an origin: + +``` +cd $(go env GOPATH)/src/go.opencensus.io +git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git +``` + +Run tests: + +``` +$ make install-tools # Only first time. +$ make +``` + +Checkout a new branch, make modifications and push the branch to your fork: + +``` +$ git checkout -b feature +# edit files +$ git commit +$ git push fork feature +``` + +Open a pull request against the main opencensus-go repo. + +## General Notes +This project uses Appveyor and Travis for CI. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/ecs-agent/vendor/go.opencensus.io/LICENSE b/ecs-agent/vendor/go.opencensus.io/LICENSE new file mode 100644 index 00000000000..7a4a3ea2424 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ecs-agent/vendor/go.opencensus.io/Makefile b/ecs-agent/vendor/go.opencensus.io/Makefile new file mode 100644 index 00000000000..d896edc9968 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/Makefile @@ -0,0 +1,97 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +GOIMPORTS=goimports +GOLINT=golint +GOVET=go vet +EMBEDMD=embedmd +# TODO decide if we need to change these names. +TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" +TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') + +.DEFAULT_GOAL := imports-lint-vet-embedmd-test + +.PHONY: imports-lint-vet-embedmd-test +imports-lint-vet-embedmd-test: imports lint vet embedmd test + +# TODO enable test-with-coverage in tavis +.PHONY: travis-ci +travis-ci: imports lint vet embedmd test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: imports +imports: + @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ + if [ "$$IMPORTSOUT" ]; then \ + echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ + echo "$$IMPORTSOUT\n"; \ + exit 1; \ + else \ + echo "Imports finished successfully"; \ + fi + +.PHONY: lint +lint: + @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ + if [ "$$LINTOUT" ]; then \ + echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ + echo "$$LINTOUT\n"; \ + exit 1; \ + else \ + echo "Lint finished successfully"; \ + fi + +.PHONY: vet +vet: + # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" + @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ + if [ "$$VETOUT" ]; then \ + echo "$(GOVET) FAILED => go vet the following files:\n"; \ + echo "$$VETOUT\n"; \ + exit 1; \ + else \ + echo "Vet finished successfully"; \ + fi + +.PHONY: embedmd +embedmd: + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ + if [ "$$EMBEDMDOUT" ]; then \ + echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ + echo "$$EMBEDMDOUT\n"; \ + exit 1; \ + else \ + echo "Embedmd finished successfully"; \ + fi + +.PHONY: install-tools +install-tools: + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/ecs-agent/vendor/go.opencensus.io/README.md b/ecs-agent/vendor/go.opencensus.io/README.md new file mode 100644 index 00000000000..1d7e837116f --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/README.md @@ -0,0 +1,267 @@ +# OpenCensus Libraries for Go + +[![Build Status][travis-image]][travis-url] +[![Windows Build Status][appveyor-image]][appveyor-url] +[![GoDoc][godoc-image]][godoc-url] +[![Gitter chat][gitter-image]][gitter-url] + +OpenCensus Go is a Go implementation of OpenCensus, a toolkit for +collecting application performance and behavior monitoring data. +Currently it consists of three major components: tags, stats and tracing. + +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). + +## Installation + +``` +$ go get -u go.opencensus.io +``` + +The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). +The use of vendoring or a dependency management tool is recommended. + +## Prerequisites + +OpenCensus Go libraries require Go 1.8 or later. + +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) + +If you're using a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + +## Exporters + +OpenCensus can export instrumentation data to various backends. +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): + +* [Prometheus][exporter-prom] for stats +* [OpenZipkin][exporter-zipkin] for traces +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces +* [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces +* [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats +* [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +## Tags + +Tags represent propagated key-value pairs. They are propagated using `context.Context` +in the same process or can be encoded to be transmitted on the wire. Usually, this will +be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` +for gRPC. + +Package `tag` allows adding or modifying tags in the current context. + +[embedmd]:# (internal/readme/tags.go new) +```go +ctx, err := tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Upsert(userIDKey, "cde36753ed"), +) +if err != nil { + log.Fatal(err) +} +``` + +## Stats + +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. + +OpenCensus stats collection happens in two stages: + +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data + +### Recording + +Measurements are data points associated with a measure. +Recording implicitly tags the set of Measurements with the tags from the +provided context: + +[embedmd]:# (internal/readme/stats.go record) +```go +stats.Record(ctx, videoSize.M(102478)) +``` + +### Views + +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (measurements). + +Views have two parts: the tags to group by and the aggregation type used. + +Currently three types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. + +[embedmd]:# (internal/readme/stats.go aggs) +```go +distAgg := view.Distribution(1<<32, 2<<32, 3<<32) +countAgg := view.Count() +sumAgg := view.Sum() +``` + +Here we create a view with the DistributionAggregation over our measure. + +[embedmd]:# (internal/readme/stats.go view) +```go +if err := view.Register(&view.View{ + Name: "example.com/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), +}); err != nil { + log.Fatalf("Failed to register view: %v", err) +} +``` + +Register begins collecting data for the view. Registered views' data will be +exported via the registered exporters. + +## Traces + +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the span’s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has a name, latency, status and +additional metadata. + +Below we are starting a span for a cache read and ending it +when we are done: + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + +## Profiles + +OpenCensus tags can be applied as profiler labels +for users who are on Go 1.9 and above. + +[embedmd]:# (internal/readme/tags.go profiler) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Insert(userIDKey, "fff0989878"), +) +if err != nil { + log.Fatal(err) +} +tag.Do(ctx, func(ctx context.Context) { + // Do work. + // When profiling is on, samples will be + // recorded with the key/values from the tag map. +}) +``` + +A screenshot of the CPU profile from the program above: + +![CPU profile](https://i.imgur.com/jBKjlkw.png) + +## Deprecation Policy + +Before version 1.0.0, the following deprecation policy will be observed: + +No backwards-incompatible changes will be made except for the removal of symbols that have +been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release +removing the *Deprecated* functionality will be made no sooner than 28 days after the first +release in which the functionality was marked *Deprecated*. + +[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master +[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go +[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true +[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master +[godoc-image]: https://godoc.org/go.opencensus.io?status.svg +[godoc-url]: https://godoc.org/go.opencensus.io +[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg +[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap +[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace + +[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus +[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver +[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/ecs-agent/vendor/go.opencensus.io/appveyor.yml b/ecs-agent/vendor/go.opencensus.io/appveyor.yml new file mode 100644 index 00000000000..d08f0edaff9 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/appveyor.yml @@ -0,0 +1,24 @@ +version: "{build}" + +platform: x64 + +clone_folder: c:\gopath\src\go.opencensus.io + +environment: + GOPATH: 'c:\gopath' + GO111MODULE: 'on' + CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 + +stack: go 1.11 + +before_test: + - go version + - go env + +build: false +deploy: false + +test_script: + - cd %APPVEYOR_BUILD_FOLDER% + - go build -v .\... + - go test -v .\... # No -race because cgo is disabled diff --git a/ecs-agent/vendor/go.opencensus.io/internal/internal.go b/ecs-agent/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 00000000000..81dc7183ec3 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} diff --git a/ecs-agent/vendor/go.opencensus.io/internal/sanitize.go b/ecs-agent/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 00000000000..de8ccf236c4 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/ecs-agent/vendor/go.opencensus.io/internal/traceinternals.go b/ecs-agent/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 00000000000..073af7b473a --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/ecs-agent/vendor/go.opencensus.io/opencensus.go b/ecs-agent/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 00000000000..11e31f421c5 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.24.0" +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/basetypes.go b/ecs-agent/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 00000000000..c8e26ed6355 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,129 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// Key returns the attribute's key +func (a *Attribute) Key() string { + return a.key +} + +// Value returns the attribute's value +func (a *Attribute) Value() interface{} { + return a.value +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/config.go b/ecs-agent/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 00000000000..775f8274faa --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/doc.go b/ecs-agent/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 00000000000..7a1616a55c5 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,52 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + +# Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +# Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/ecs-agent/vendor/go.opencensus.io/trace/evictedqueue.go b/ecs-agent/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 00000000000..ffc264f23d2 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/export.go b/ecs-agent/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 00000000000..e0d9a4b99e9 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/internal/internal.go b/ecs-agent/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 00000000000..7e808d8f30e --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/lrumap.go b/ecs-agent/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 00000000000..80095a5f6c0 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,61 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/golang/groupcache/lru" +) + +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. +type lruMap struct { + cacheKeys map[lru.Key]bool + cache *lru.Cache + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } + return lm +} + +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := make([]interface{}, 0, len(lm.cacheKeys)) + for k := range lm.cacheKeys { + keys = append(keys, k) + } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/sampling.go b/ecs-agent/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 00000000000..71c10f9e3b4 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/spanbucket.go b/ecs-agent/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 00000000000..fbabad34c00 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/spanstore.go b/ecs-agent/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 00000000000..e601f76f2c8 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,308 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for activeSpan := range s.active { + if s, ok := activeSpan.(*span); ok { + out = append(out, s.makeSpanData()) + } + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[SpanInterface]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[SpanInterface]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span SpanInterface) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span SpanInterface, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/status_codes.go b/ecs-agent/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 00000000000..ec60effd108 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/ecs-agent/vendor/go.opencensus.io/trace/trace.go b/ecs-agent/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 00000000000..861df9d3913 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,595 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +type tracer struct{} + +var _ Tracer = &tracer{} + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func (t *tracer) FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := t.FromContext(ctx); p != nil { + if ps, ok := p.internal.(*span); ok { + ps.addChild() + } + parent = p.SpanContext() + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { + s := &span{} + s.spanContext = parent + + cfg := config.Load().(*Config) + if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { + // lazy initialization + gen.init() + } + + if !hasParent { + s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + s.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: s.spanContext.TraceID, + SpanID: s.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { + return s + } + + s.data = &SpanData{ + SpanContext: s.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + s.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + s.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + s.spanStore = ss + ss.add(s) + } + } + + return s +} + +// End ends the span. +func (s *span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0, len(s.links.queue)) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0, len(s.annotations.queue)) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}, s.lruAttributes.len()) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +func (s *span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var am map[string]interface{} + if len(attributes) != 0 { + am = make(map[string]interface{}, len(attributes)) + for _, attr := range attributes { + am[attr.key] = attr.value + } + } + s.mu.Lock() + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: am, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, fmt.Sprintf(format, a...)) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: &defaultIDGenerator{}, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand + + initOnce sync.Once +} + +// init initializes the generator on the first call to avoid consuming entropy +// unnecessarily. +func (gen *defaultIDGenerator) init() { + gen.initOnce.Do(func() { + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + }) +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/trace_api.go b/ecs-agent/vendor/go.opencensus.io/trace/trace_api.go new file mode 100644 index 00000000000..9e2c3a99926 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/trace_api.go @@ -0,0 +1,265 @@ +// Copyright 2020, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" +) + +// DefaultTracer is the tracer used when package-level exported functions are invoked. +var DefaultTracer Tracer = &tracer{} + +// Tracer can start spans and access context functions. +type Tracer interface { + + // StartSpan starts a new child span of the current span in the context. If + // there is no span in the context, creates a new trace and span. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) + + // StartSpanWithRemoteParent starts a new child span of the span from the given parent. + // + // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is + // preferred for cases where the parent is propagated via an incoming request. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) + + // FromContext returns the Span stored in a context, or nil if there isn't one. + FromContext(ctx context.Context) *Span + + // NewContext returns a new context with the given Span attached. + NewContext(parent context.Context, s *Span) context.Context +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpan(ctx, name, o...) +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) +} + +// FromContext returns the Span stored in a context, or a Span that is not +// recording events if there isn't one. +func FromContext(ctx context.Context) *Span { + return DefaultTracer.FromContext(ctx) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return DefaultTracer.NewContext(parent, s) +} + +// SpanInterface represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type SpanInterface interface { + + // IsRecordingEvents returns true if events are being recorded for this span. + // Use this check to avoid computing expensive annotations when they will never + // be used. + IsRecordingEvents() bool + + // End ends the span. + End() + + // SpanContext returns the SpanContext of the span. + SpanContext() SpanContext + + // SetName sets the name of the span, if it is recording events. + SetName(name string) + + // SetStatus sets the status of the span, if it is recording events. + SetStatus(status Status) + + // AddAttributes sets attributes in the span. + // + // Existing attributes whose keys appear in the attributes parameter are overwritten. + AddAttributes(attributes ...Attribute) + + // Annotate adds an annotation with attributes. + // Attributes can be nil. + Annotate(attributes []Attribute, str string) + + // Annotatef adds an annotation with attributes. + Annotatef(attributes []Attribute, format string, a ...interface{}) + + // AddMessageSendEvent adds a message send event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddMessageReceiveEvent adds a message receive event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddLink adds a link to the span. + AddLink(l Link) + + // String prints a string representation of a span. + String() string +} + +// NewSpan is a convenience function for creating a *Span out of a *span +func NewSpan(s SpanInterface) *Span { + return &Span{internal: s} +} + +// Span is a struct wrapper around the SpanInt interface, which allows correctly handling +// nil spans, while also allowing the SpanInterface implementation to be swapped out. +type Span struct { + internal SpanInterface +} + +// Internal returns the underlying implementation of the Span +func (s *Span) Internal() SpanInterface { + return s.internal +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.internal.IsRecordingEvents() +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + s.internal.End() +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.internal.SpanContext() +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetName(name) +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetStatus(status) +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddAttributes(attributes...) +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotate(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotatef(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddLink(l) +} + +// String prints a string representation of a span. +func (s *Span) String() string { + if s == nil { + return "" + } + return s.internal.String() +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/trace_go11.go b/ecs-agent/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 00000000000..b8fc1e495a9 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.11 +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/trace_nongo11.go b/ecs-agent/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 00000000000..da488fc8740 --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,26 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.11 +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/ecs-agent/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/ecs-agent/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 00000000000..2d6c713eb3a --- /dev/null +++ b/ecs-agent/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_signed.go b/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 00000000000..7def9580e6f --- /dev/null +++ b/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/ioctl.go b/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 92% rename from ecs-agent/vendor/golang.org/x/sys/unix/ioctl.go rename to ecs-agent/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 7ce8dd406ff..649913d1ea7 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/ioctl.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_zos.go b/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_zos.go index 6532f09af2e..cdc21bf76dc 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,14 +17,14 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. return ioctlPtr(fd, req, unsafe.Pointer(value)) @@ -33,7 +33,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -47,13 +47,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -62,7 +62,7 @@ func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/mkerrors.sh b/ecs-agent/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9ddde1..2045d3dadb8 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/ecs-agent/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -521,6 +522,7 @@ ccflags="$@" $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix.go index d9f5544ccf4..c406ae00f41 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -408,8 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be1630..f2871fa9535 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed17098e..75718ec0f19 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_darwin.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_darwin.go index 7064d6ebab6..206921504cb 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -613,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -622,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -676,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 221efc26bcd..d4ce988e72f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -326,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 5bdde03e4a8..afb10106f6e 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -433,7 +433,6 @@ func Dup3(oldfd, newfd, flags int) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux.go index 9735331530a..fbaeb5fff14 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1873,7 +1873,6 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1887,6 +1886,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_386.go index ff5b5899d6d..c7d9945ea19 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -97,33 +97,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 9b270353298..5b21fcfd753 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -46,7 +46,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 856ad1d635c..da2986415ae 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -171,33 +171,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 6422704bc52..a81f5742b8a 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -39,7 +39,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -143,15 +142,6 @@ func Getrlimit(resource int, rlim *Rlimit) error { return getrlimit(resource, rlim) } -// Setrlimit prefers the prlimit64 system call. See issue 38604. -func Setrlimit(resource int, rlim *Rlimit) error { - err := Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - return setrlimit(resource, rlim) -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 59dab510e97..69d2d7c3db7 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -126,11 +126,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - return -} - func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(dirfd, path, nil, 0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index bfef09a39eb..76d564095ef 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -37,7 +37,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index ab302509663..aae7f0ffd3f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -151,33 +151,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return r.Epc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index eac1cf1acc8..66eff19a320 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -159,33 +159,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint32 { return r.Nip } func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 4df56616b8f..806aa2574d8 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -34,7 +34,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5f4243dea2c..35851ef70b8 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -38,7 +38,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index d0a7d406685..2f89e8f5def 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -34,7 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index f5c793be26d..7ca064ae764 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -31,7 +31,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e66865dccbe..018d7d47822 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -340,7 +340,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -501,7 +500,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // compat_43_osendmsg // compat_43_osethostid // compat_43_osethostname -// compat_43_osetrlimit // compat_43_osigblock // compat_43_osigsetmask // compat_43_osigstack diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 5e9de23ae37..f9c7a9663c6 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -294,7 +294,6 @@ func Uname(uname *Utsname) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_solaris.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_solaris.go index d3444b64d6d..b600a289d33 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -545,24 +545,24 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl -//sys ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) = libc.ioctl +//sys ioctlRet(fd int, req int, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) = libc.ioctl -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, err = ioctlRet(fd, req, arg) return err } -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, err = ioctlPtrRet(fd, req, arg) return err } -func IoctlSetTermio(fd int, req uint, value *Termio) error { +func IoctlSetTermio(fd int, req int, value *Termio) error { return ioctlPtr(fd, req, unsafe.Pointer(value)) } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -665,7 +665,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown @@ -1080,11 +1079,11 @@ func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags return retCl, retData, flags, nil } -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { +func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { return ioctlRet(fd, req, uintptr(arg)) } -func IoctlSetString(fd int, req uint, val string) error { +func IoctlSetString(fd int, req int, val string) error { bs := make([]byte, len(val)+1) copy(bs[:len(bs)-1], val) err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) @@ -1120,7 +1119,7 @@ func (l *Lifreq) GetLifruUint() uint { return *(*uint)(unsafe.Pointer(&l.Lifru[0])) } -func IoctlLifreq(fd int, req uint, l *Lifreq) error { +func IoctlLifreq(fd int, req int, l *Lifreq) error { return ioctlPtr(fd, req, unsafe.Pointer(l)) } @@ -1131,6 +1130,6 @@ func (s *Strioctl) SetInt(i int) { s.Dp = (*int8)(unsafe.Pointer(&i)) } -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { +func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_unix.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_unix.go index 00f0aa37588..8e48c29ec33 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -587,3 +587,10 @@ func emptyIovecs(iov []Iovec) bool { } return true } + +// Setrlimit sets a resource limit. +func Setrlimit(resource int, rlim *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall.Setrlimit(resource, (*syscall.Rlimit)(rlim)) +} diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b295497ae47..d3d49ec3ed7 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -212,8 +212,8 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP -//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 476a1c7e77c..14300762715 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e36f5178d60..ab044a74274 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ef9dcd1bef8..9a257219d70 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -124,7 +124,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit64(int, uintptr_t); -int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); @@ -213,7 +212,7 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) if r0 == -1 && er != nil { err = er @@ -223,7 +222,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))) if r0 == -1 && er != nil { err = er @@ -1464,16 +1463,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) off = int64(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f86a9459234..6de80c20cf2 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -93,8 +93,8 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, e1 := callioctl(fd, int(req), arg) +func ioctl(fd int, req int, arg uintptr) (err error) { + _, e1 := callioctl(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -103,8 +103,8 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { - _, e1 := callioctl_ptr(fd, int(req), arg) +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, e1 := callioctl_ptr(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -1422,16 +1422,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, e1 := calllseek(fd, offset, whence) off = int64(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index d32a84cae27..c4d50ae5005 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -124,7 +124,6 @@ import ( //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" @@ -242,7 +241,6 @@ import ( //go:linkname libc_getsystemcfg libc_getsystemcfg //go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit -//go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek //go:linkname libc_mmap64 libc_mmap64 @@ -363,7 +361,6 @@ var ( libc_getsystemcfg, libc_umount, libc_getrlimit, - libc_setrlimit, libc_lseek, libc_mmap64 syscallFunc ) @@ -1179,13 +1176,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) return diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index d7d8baf819c..6903d3b09e3 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -123,7 +123,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); long long lseek(int, long long, int); uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); @@ -131,6 +130,7 @@ uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); import "C" import ( "syscall" + "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1055,14 +1055,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) e1 = syscall.GetErrno() diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index a29ffdd566d..4037ccf7a94 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0eb96..4baaed0bc12 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 2fd4590bb78..51d6f3fb256 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c987c..c3b82c03793 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 3b85134707e..0eabac7ade2 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1410,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 1129065624e..ee313eb0073 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 55f5abfe599..4c986e448ee 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d39651c2b58..555216944a0 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index ddb74086801..67a226fbf5e 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 09a53a616c0..f0b9ddaaa26 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 430cb24de7e..da63d9d7822 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1346,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad4777..07b549cc25e 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -411,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce7f4d..5f481bf83f4 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,16 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39f161..824cd52c7fa 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -578,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f2140..e77aecfe985 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,16 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cbb1ca..961a3afb7b7 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380a40e..ed05005e91b 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4dadba..d365b718f30 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15f07c..c3f1b8bbde0 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432aed51..a6574cf98b1 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -624,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7baff..f40990264f4 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0fd61e..9dfcc29974f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1d239..0b29239583b 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,16 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfecd6a..6cde32237dc 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,16 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f8914..5253d65bf1b 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,16 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 8e1d9c8f666..cdb2af5ae0f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 21c6950400e..9d25f76b0bf 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 298168f90a1..d3f8035169f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 68b8bd492fe..887188a529e 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 0b0f910e1ab..6699a783e1f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 087444250c9..04f0de34b2e 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 48ff5de75b5..1e775fe0571 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd10844..27b6f4df74f 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 2452a641dae..7f6427899a5 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf310420c94..b797045fd2d 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 5e35600a60c..756ef7b1736 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42e0a8..a871266221e 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b04cef1a198..7bc2e24eb95 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af27263ad..05d4bffd791 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 47a07ee0c27..739be6217a3 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255b0d5..74a25f8d643 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -687,12 +687,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 573378fdb96..7d95a197803 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4dfa8c..990be245740 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4873a1e5d3e..609d1c598a8 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -110,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -646,7 +643,7 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -657,7 +654,7 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) { +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -1650,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 07bfe2ef9ad..c31681743c7 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,7 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) @@ -267,7 +267,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f0991a..690cefc3d06 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa775219f..5bffc10eac0 100644 --- a/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/ecs-agent/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/ecs-agent/vendor/golang.org/x/sys/windows/types_windows.go b/ecs-agent/vendor/golang.org/x/sys/windows/types_windows.go index 857acf1032d..0dbb2084117 100644 --- a/ecs-agent/vendor/golang.org/x/sys/windows/types_windows.go +++ b/ecs-agent/vendor/golang.org/x/sys/windows/types_windows.go @@ -2229,10 +2229,10 @@ const ( JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) diff --git a/ecs-agent/vendor/golang.org/x/tools/internal/typeparams/common.go b/ecs-agent/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d30e..cfba8189f15 100644 --- a/ecs-agent/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/ecs-agent/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -87,7 +87,6 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } base := recv.Type() diff --git a/ecs-agent/vendor/modules.txt b/ecs-agent/vendor/modules.txt index bfde5c6ab48..71b138313b9 100644 --- a/ecs-agent/vendor/modules.txt +++ b/ecs-agent/vendor/modules.txt @@ -1,3 +1,31 @@ +# github.com/Microsoft/go-winio v0.5.2 +## explicit; go 1.13 +github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/pkg/guid +github.com/Microsoft/go-winio/pkg/security +github.com/Microsoft/go-winio/vhd +# github.com/Microsoft/hcsshim v0.9.9 +## explicit; go 1.13 +github.com/Microsoft/hcsshim/computestorage +github.com/Microsoft/hcsshim/hcn +github.com/Microsoft/hcsshim/internal/cni +github.com/Microsoft/hcsshim/internal/cow +github.com/Microsoft/hcsshim/internal/hcs +github.com/Microsoft/hcsshim/internal/hcs/schema1 +github.com/Microsoft/hcsshim/internal/hcs/schema2 +github.com/Microsoft/hcsshim/internal/hcserror +github.com/Microsoft/hcsshim/internal/interop +github.com/Microsoft/hcsshim/internal/jobobject +github.com/Microsoft/hcsshim/internal/log +github.com/Microsoft/hcsshim/internal/logfields +github.com/Microsoft/hcsshim/internal/oc +github.com/Microsoft/hcsshim/internal/queue +github.com/Microsoft/hcsshim/internal/regstate +github.com/Microsoft/hcsshim/internal/runhcs +github.com/Microsoft/hcsshim/internal/timeout +github.com/Microsoft/hcsshim/internal/vmcompute +github.com/Microsoft/hcsshim/internal/winapi +github.com/Microsoft/hcsshim/osversion # github.com/aws/aws-sdk-go v1.36.0 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws @@ -35,6 +63,24 @@ github.com/cihub/seelog/archive/zip # github.com/container-storage-interface/spec v1.8.0 ## explicit; go 1.18 github.com/container-storage-interface/spec/lib/go/csi +# github.com/containerd/cgroups v1.0.4 +## explicit; go 1.17 +github.com/containerd/cgroups/stats/v1 +# github.com/containernetworking/cni v1.1.2 +## explicit; go 1.14 +github.com/containernetworking/cni/libcni +github.com/containernetworking/cni/pkg/invoke +github.com/containernetworking/cni/pkg/types +github.com/containernetworking/cni/pkg/types/020 +github.com/containernetworking/cni/pkg/types/040 +github.com/containernetworking/cni/pkg/types/100 +github.com/containernetworking/cni/pkg/types/create +github.com/containernetworking/cni/pkg/types/internal +github.com/containernetworking/cni/pkg/utils +github.com/containernetworking/cni/pkg/version +# github.com/containernetworking/plugins v0.9.1 +## explicit; go 1.14 +github.com/containernetworking/plugins/pkg/ns # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -68,8 +114,13 @@ github.com/docker/go-units github.com/go-logr/logr # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto -# github.com/golang/mock v1.4.1 +github.com/gogo/protobuf/protoc-gen-gogo/descriptor +# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +## explicit +github.com/golang/groupcache/lru +# github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock github.com/golang/mock/mockgen/model @@ -130,10 +181,20 @@ github.com/prometheus/client_model/go github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/stretchr/testify v1.7.0 +# github.com/sirupsen/logrus v1.9.0 +## explicit; go 1.13 +github.com/sirupsen/logrus +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require +# go.opencensus.io v0.24.0 +## explicit; go 1.13 +go.opencensus.io +go.opencensus.io/internal +go.opencensus.io/trace +go.opencensus.io/trace/internal +go.opencensus.io/trace/tracestate # golang.org/x/mod v0.10.0 ## explicit; go 1.17 golang.org/x/mod/internal/lazyregexp @@ -149,7 +210,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sys v0.6.0 +# golang.org/x/sys v0.7.0 ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader @@ -166,7 +227,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.6.0 +# golang.org/x/tools v0.7.0 ## explicit; go 1.18 golang.org/x/tools/go/ast/astutil golang.org/x/tools/imports